1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. |
3 | * |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. |
5 | * This allows to use PCI devices that only support 32bit addresses on systems |
6 | * with more than 4GB. |
7 | * |
8 | * See Documentation/DMA-API-HOWTO.txt for the interface specification. |
9 | * |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
11 | * Subject to the GNU General Public License v2 only. |
12 | */ |
13 | |
14 | #include <linux/types.h> |
15 | #include <linux/ctype.h> |
16 | #include <linux/agp_backend.h> |
17 | #include <linux/init.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/sched/debug.h> |
21 | #include <linux/string.h> |
22 | #include <linux/spinlock.h> |
23 | #include <linux/pci.h> |
24 | #include <linux/topology.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/bitmap.h> |
27 | #include <linux/kdebug.h> |
28 | #include <linux/scatterlist.h> |
29 | #include <linux/iommu-helper.h> |
30 | #include <linux/syscore_ops.h> |
31 | #include <linux/io.h> |
32 | #include <linux/gfp.h> |
33 | #include <linux/atomic.h> |
34 | #include <linux/dma-direct.h> |
35 | #include <asm/mtrr.h> |
36 | #include <asm/pgtable.h> |
37 | #include <asm/proto.h> |
38 | #include <asm/iommu.h> |
39 | #include <asm/gart.h> |
40 | #include <asm/set_memory.h> |
41 | #include <asm/swiotlb.h> |
42 | #include <asm/dma.h> |
43 | #include <asm/amd_nb.h> |
44 | #include <asm/x86_init.h> |
45 | #include <asm/iommu_table.h> |
46 | |
47 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
48 | static unsigned long iommu_size; /* size of remapping area bytes */ |
49 | static unsigned long iommu_pages; /* .. and in pages */ |
50 | |
51 | static u32 *iommu_gatt_base; /* Remapping table */ |
52 | |
53 | /* |
54 | * If this is disabled the IOMMU will use an optimized flushing strategy |
55 | * of only flushing when an mapping is reused. With it true the GART is |
56 | * flushed for every mapping. Problem is that doing the lazy flush seems |
57 | * to trigger bugs with some popular PCI cards, in particular 3ware (but |
58 | * has been also also seen with Qlogic at least). |
59 | */ |
60 | static int iommu_fullflush = 1; |
61 | |
62 | /* Allocation bitmap for the remapping area: */ |
63 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
64 | /* Guarded by iommu_bitmap_lock: */ |
65 | static unsigned long *iommu_gart_bitmap; |
66 | |
67 | static u32 gart_unmapped_entry; |
68 | |
69 | #define GPTE_VALID 1 |
70 | #define GPTE_COHERENT 2 |
71 | #define GPTE_ENCODE(x) \ |
72 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
73 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
74 | |
75 | #ifdef CONFIG_AGP |
76 | #define AGPEXTERN extern |
77 | #else |
78 | #define AGPEXTERN |
79 | #endif |
80 | |
81 | /* GART can only remap to physical addresses < 1TB */ |
82 | #define GART_MAX_PHYS_ADDR (1ULL << 40) |
83 | |
84 | /* backdoor interface to AGP driver */ |
85 | AGPEXTERN int agp_memory_reserved; |
86 | AGPEXTERN __u32 *agp_gatt_table; |
87 | |
88 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ |
89 | static bool need_flush; /* global flush state. set for each gart wrap */ |
90 | |
91 | static unsigned long alloc_iommu(struct device *dev, int size, |
92 | unsigned long align_mask) |
93 | { |
94 | unsigned long offset, flags; |
95 | unsigned long boundary_size; |
96 | unsigned long base_index; |
97 | |
98 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), |
99 | PAGE_SIZE) >> PAGE_SHIFT; |
100 | boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, |
101 | PAGE_SIZE) >> PAGE_SHIFT; |
102 | |
103 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
104 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
105 | size, base_index, boundary_size, align_mask); |
106 | if (offset == -1) { |
107 | need_flush = true; |
108 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, |
109 | size, base_index, boundary_size, |
110 | align_mask); |
111 | } |
112 | if (offset != -1) { |
113 | next_bit = offset+size; |
114 | if (next_bit >= iommu_pages) { |
115 | next_bit = 0; |
116 | need_flush = true; |
117 | } |
118 | } |
119 | if (iommu_fullflush) |
120 | need_flush = true; |
121 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
122 | |
123 | return offset; |
124 | } |
125 | |
126 | static void free_iommu(unsigned long offset, int size) |
127 | { |
128 | unsigned long flags; |
129 | |
130 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
131 | bitmap_clear(iommu_gart_bitmap, offset, size); |
132 | if (offset >= next_bit) |
133 | next_bit = offset + size; |
134 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
135 | } |
136 | |
137 | /* |
138 | * Use global flush state to avoid races with multiple flushers. |
139 | */ |
140 | static void flush_gart(void) |
141 | { |
142 | unsigned long flags; |
143 | |
144 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
145 | if (need_flush) { |
146 | amd_flush_garts(); |
147 | need_flush = false; |
148 | } |
149 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
150 | } |
151 | |
152 | #ifdef CONFIG_IOMMU_LEAK |
153 | /* Debugging aid for drivers that don't free their IOMMU tables */ |
154 | static void dump_leak(void) |
155 | { |
156 | static int dump; |
157 | |
158 | if (dump) |
159 | return; |
160 | dump = 1; |
161 | |
162 | show_stack(NULL, NULL); |
163 | debug_dma_dump_mappings(NULL); |
164 | } |
165 | #endif |
166 | |
167 | static void iommu_full(struct device *dev, size_t size, int dir) |
168 | { |
169 | /* |
170 | * Ran out of IOMMU space for this operation. This is very bad. |
171 | * Unfortunately the drivers cannot handle this operation properly. |
172 | * Return some non mapped prereserved space in the aperture and |
173 | * let the Northbridge deal with it. This will result in garbage |
174 | * in the IO operation. When the size exceeds the prereserved space |
175 | * memory corruption will occur or random memory will be DMAed |
176 | * out. Hopefully no network devices use single mappings that big. |
177 | */ |
178 | |
179 | dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n" , size); |
180 | #ifdef CONFIG_IOMMU_LEAK |
181 | dump_leak(); |
182 | #endif |
183 | } |
184 | |
185 | static inline int |
186 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
187 | { |
188 | return force_iommu || !dma_capable(dev, addr, size); |
189 | } |
190 | |
191 | static inline int |
192 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
193 | { |
194 | return !dma_capable(dev, addr, size); |
195 | } |
196 | |
197 | /* Map a single continuous physical area into the IOMMU. |
198 | * Caller needs to check if the iommu is needed and flush. |
199 | */ |
200 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
201 | size_t size, int dir, unsigned long align_mask) |
202 | { |
203 | unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); |
204 | unsigned long iommu_page; |
205 | int i; |
206 | |
207 | if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) |
208 | return DMA_MAPPING_ERROR; |
209 | |
210 | iommu_page = alloc_iommu(dev, npages, align_mask); |
211 | if (iommu_page == -1) { |
212 | if (!nonforced_iommu(dev, phys_mem, size)) |
213 | return phys_mem; |
214 | if (panic_on_overflow) |
215 | panic("dma_map_area overflow %lu bytes\n" , size); |
216 | iommu_full(dev, size, dir); |
217 | return DMA_MAPPING_ERROR; |
218 | } |
219 | |
220 | for (i = 0; i < npages; i++) { |
221 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); |
222 | phys_mem += PAGE_SIZE; |
223 | } |
224 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); |
225 | } |
226 | |
227 | /* Map a single area into the IOMMU */ |
228 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, |
229 | unsigned long offset, size_t size, |
230 | enum dma_data_direction dir, |
231 | unsigned long attrs) |
232 | { |
233 | unsigned long bus; |
234 | phys_addr_t paddr = page_to_phys(page) + offset; |
235 | |
236 | if (!dev) |
237 | dev = &x86_dma_fallback_dev; |
238 | |
239 | if (!need_iommu(dev, paddr, size)) |
240 | return paddr; |
241 | |
242 | bus = dma_map_area(dev, paddr, size, dir, 0); |
243 | flush_gart(); |
244 | |
245 | return bus; |
246 | } |
247 | |
248 | /* |
249 | * Free a DMA mapping. |
250 | */ |
251 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, |
252 | size_t size, enum dma_data_direction dir, |
253 | unsigned long attrs) |
254 | { |
255 | unsigned long iommu_page; |
256 | int npages; |
257 | int i; |
258 | |
259 | if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR)) |
260 | return; |
261 | |
262 | /* |
263 | * This driver will not always use a GART mapping, but might have |
264 | * created a direct mapping instead. If that is the case there is |
265 | * nothing to unmap here. |
266 | */ |
267 | if (dma_addr < iommu_bus_base || |
268 | dma_addr >= iommu_bus_base + iommu_size) |
269 | return; |
270 | |
271 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
272 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
273 | for (i = 0; i < npages; i++) { |
274 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
275 | } |
276 | free_iommu(iommu_page, npages); |
277 | } |
278 | |
279 | /* |
280 | * Wrapper for pci_unmap_single working with scatterlists. |
281 | */ |
282 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
283 | enum dma_data_direction dir, unsigned long attrs) |
284 | { |
285 | struct scatterlist *s; |
286 | int i; |
287 | |
288 | for_each_sg(sg, s, nents, i) { |
289 | if (!s->dma_length || !s->length) |
290 | break; |
291 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); |
292 | } |
293 | } |
294 | |
295 | /* Fallback for dma_map_sg in case of overflow */ |
296 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, |
297 | int nents, int dir) |
298 | { |
299 | struct scatterlist *s; |
300 | int i; |
301 | |
302 | #ifdef CONFIG_IOMMU_DEBUG |
303 | pr_debug("dma_map_sg overflow\n" ); |
304 | #endif |
305 | |
306 | for_each_sg(sg, s, nents, i) { |
307 | unsigned long addr = sg_phys(s); |
308 | |
309 | if (nonforced_iommu(dev, addr, s->length)) { |
310 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
311 | if (addr == DMA_MAPPING_ERROR) { |
312 | if (i > 0) |
313 | gart_unmap_sg(dev, sg, i, dir, 0); |
314 | nents = 0; |
315 | sg[0].dma_length = 0; |
316 | break; |
317 | } |
318 | } |
319 | s->dma_address = addr; |
320 | s->dma_length = s->length; |
321 | } |
322 | flush_gart(); |
323 | |
324 | return nents; |
325 | } |
326 | |
327 | /* Map multiple scatterlist entries continuous into the first. */ |
328 | static int __dma_map_cont(struct device *dev, struct scatterlist *start, |
329 | int nelems, struct scatterlist *sout, |
330 | unsigned long pages) |
331 | { |
332 | unsigned long iommu_start = alloc_iommu(dev, pages, 0); |
333 | unsigned long iommu_page = iommu_start; |
334 | struct scatterlist *s; |
335 | int i; |
336 | |
337 | if (iommu_start == -1) |
338 | return -1; |
339 | |
340 | for_each_sg(start, s, nelems, i) { |
341 | unsigned long pages, addr; |
342 | unsigned long phys_addr = s->dma_address; |
343 | |
344 | BUG_ON(s != start && s->offset); |
345 | if (s == start) { |
346 | sout->dma_address = iommu_bus_base; |
347 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; |
348 | sout->dma_length = s->length; |
349 | } else { |
350 | sout->dma_length += s->length; |
351 | } |
352 | |
353 | addr = phys_addr; |
354 | pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); |
355 | while (pages--) { |
356 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
357 | addr += PAGE_SIZE; |
358 | iommu_page++; |
359 | } |
360 | } |
361 | BUG_ON(iommu_page - iommu_start != pages); |
362 | |
363 | return 0; |
364 | } |
365 | |
366 | static inline int |
367 | dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, |
368 | struct scatterlist *sout, unsigned long pages, int need) |
369 | { |
370 | if (!need) { |
371 | BUG_ON(nelems != 1); |
372 | sout->dma_address = start->dma_address; |
373 | sout->dma_length = start->length; |
374 | return 0; |
375 | } |
376 | return __dma_map_cont(dev, start, nelems, sout, pages); |
377 | } |
378 | |
379 | /* |
380 | * DMA map all entries in a scatterlist. |
381 | * Merge chunks that have page aligned sizes into a continuous mapping. |
382 | */ |
383 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
384 | enum dma_data_direction dir, unsigned long attrs) |
385 | { |
386 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
387 | int need = 0, nextneed, i, out, start; |
388 | unsigned long pages = 0; |
389 | unsigned int seg_size; |
390 | unsigned int max_seg_size; |
391 | |
392 | if (nents == 0) |
393 | return 0; |
394 | |
395 | if (!dev) |
396 | dev = &x86_dma_fallback_dev; |
397 | |
398 | out = 0; |
399 | start = 0; |
400 | start_sg = sg; |
401 | sgmap = sg; |
402 | seg_size = 0; |
403 | max_seg_size = dma_get_max_seg_size(dev); |
404 | ps = NULL; /* shut up gcc */ |
405 | |
406 | for_each_sg(sg, s, nents, i) { |
407 | dma_addr_t addr = sg_phys(s); |
408 | |
409 | s->dma_address = addr; |
410 | BUG_ON(s->length == 0); |
411 | |
412 | nextneed = need_iommu(dev, addr, s->length); |
413 | |
414 | /* Handle the previous not yet processed entries */ |
415 | if (i > start) { |
416 | /* |
417 | * Can only merge when the last chunk ends on a |
418 | * page boundary and the new one doesn't have an |
419 | * offset. |
420 | */ |
421 | if (!iommu_merge || !nextneed || !need || s->offset || |
422 | (s->length + seg_size > max_seg_size) || |
423 | (ps->offset + ps->length) % PAGE_SIZE) { |
424 | if (dma_map_cont(dev, start_sg, i - start, |
425 | sgmap, pages, need) < 0) |
426 | goto error; |
427 | out++; |
428 | |
429 | seg_size = 0; |
430 | sgmap = sg_next(sgmap); |
431 | pages = 0; |
432 | start = i; |
433 | start_sg = s; |
434 | } |
435 | } |
436 | |
437 | seg_size += s->length; |
438 | need = nextneed; |
439 | pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); |
440 | ps = s; |
441 | } |
442 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
443 | goto error; |
444 | out++; |
445 | flush_gart(); |
446 | if (out < nents) { |
447 | sgmap = sg_next(sgmap); |
448 | sgmap->dma_length = 0; |
449 | } |
450 | return out; |
451 | |
452 | error: |
453 | flush_gart(); |
454 | gart_unmap_sg(dev, sg, out, dir, 0); |
455 | |
456 | /* When it was forced or merged try again in a dumb way */ |
457 | if (force_iommu || iommu_merge) { |
458 | out = dma_map_sg_nonforce(dev, sg, nents, dir); |
459 | if (out > 0) |
460 | return out; |
461 | } |
462 | if (panic_on_overflow) |
463 | panic("dma_map_sg: overflow on %lu pages\n" , pages); |
464 | |
465 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
466 | for_each_sg(sg, s, nents, i) |
467 | s->dma_address = DMA_MAPPING_ERROR; |
468 | return 0; |
469 | } |
470 | |
471 | /* allocate and map a coherent mapping */ |
472 | static void * |
473 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, |
474 | gfp_t flag, unsigned long attrs) |
475 | { |
476 | void *vaddr; |
477 | |
478 | vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs); |
479 | if (!vaddr || |
480 | !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) |
481 | return vaddr; |
482 | |
483 | *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, |
484 | DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); |
485 | flush_gart(); |
486 | if (unlikely(*dma_addr == DMA_MAPPING_ERROR)) |
487 | goto out_free; |
488 | return vaddr; |
489 | out_free: |
490 | dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs); |
491 | return NULL; |
492 | } |
493 | |
494 | /* free a coherent mapping */ |
495 | static void |
496 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
497 | dma_addr_t dma_addr, unsigned long attrs) |
498 | { |
499 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); |
500 | dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); |
501 | } |
502 | |
503 | static int no_agp; |
504 | |
505 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) |
506 | { |
507 | unsigned long a; |
508 | |
509 | if (!iommu_size) { |
510 | iommu_size = aper_size; |
511 | if (!no_agp) |
512 | iommu_size /= 2; |
513 | } |
514 | |
515 | a = aper + iommu_size; |
516 | iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; |
517 | |
518 | if (iommu_size < 64*1024*1024) { |
519 | pr_warning( |
520 | "PCI-DMA: Warning: Small IOMMU %luMB." |
521 | " Consider increasing the AGP aperture in BIOS\n" , |
522 | iommu_size >> 20); |
523 | } |
524 | |
525 | return iommu_size; |
526 | } |
527 | |
528 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) |
529 | { |
530 | unsigned aper_size = 0, aper_base_32, aper_order; |
531 | u64 aper_base; |
532 | |
533 | pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); |
534 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); |
535 | aper_order = (aper_order >> 1) & 7; |
536 | |
537 | aper_base = aper_base_32 & 0x7fff; |
538 | aper_base <<= 25; |
539 | |
540 | aper_size = (32 * 1024 * 1024) << aper_order; |
541 | if (aper_base + aper_size > 0x100000000UL || !aper_size) |
542 | aper_base = 0; |
543 | |
544 | *size = aper_size; |
545 | return aper_base; |
546 | } |
547 | |
548 | static void enable_gart_translations(void) |
549 | { |
550 | int i; |
551 | |
552 | if (!amd_nb_has_feature(AMD_NB_GART)) |
553 | return; |
554 | |
555 | for (i = 0; i < amd_nb_num(); i++) { |
556 | struct pci_dev *dev = node_to_amd_nb(i)->misc; |
557 | |
558 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
559 | } |
560 | |
561 | /* Flush the GART-TLB to remove stale entries */ |
562 | amd_flush_garts(); |
563 | } |
564 | |
565 | /* |
566 | * If fix_up_north_bridges is set, the north bridges have to be fixed up on |
567 | * resume in the same way as they are handled in gart_iommu_hole_init(). |
568 | */ |
569 | static bool fix_up_north_bridges; |
570 | static u32 aperture_order; |
571 | static u32 aperture_alloc; |
572 | |
573 | void set_up_gart_resume(u32 aper_order, u32 aper_alloc) |
574 | { |
575 | fix_up_north_bridges = true; |
576 | aperture_order = aper_order; |
577 | aperture_alloc = aper_alloc; |
578 | } |
579 | |
580 | static void gart_fixup_northbridges(void) |
581 | { |
582 | int i; |
583 | |
584 | if (!fix_up_north_bridges) |
585 | return; |
586 | |
587 | if (!amd_nb_has_feature(AMD_NB_GART)) |
588 | return; |
589 | |
590 | pr_info("PCI-DMA: Restoring GART aperture settings\n" ); |
591 | |
592 | for (i = 0; i < amd_nb_num(); i++) { |
593 | struct pci_dev *dev = node_to_amd_nb(i)->misc; |
594 | |
595 | /* |
596 | * Don't enable translations just yet. That is the next |
597 | * step. Restore the pre-suspend aperture settings. |
598 | */ |
599 | gart_set_size_and_enable(dev, aperture_order); |
600 | pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); |
601 | } |
602 | } |
603 | |
604 | static void gart_resume(void) |
605 | { |
606 | pr_info("PCI-DMA: Resuming GART IOMMU\n" ); |
607 | |
608 | gart_fixup_northbridges(); |
609 | |
610 | enable_gart_translations(); |
611 | } |
612 | |
613 | static struct syscore_ops gart_syscore_ops = { |
614 | .resume = gart_resume, |
615 | |
616 | }; |
617 | |
618 | /* |
619 | * Private Northbridge GATT initialization in case we cannot use the |
620 | * AGP driver for some reason. |
621 | */ |
622 | static __init int init_amd_gatt(struct agp_kern_info *info) |
623 | { |
624 | unsigned aper_size, gatt_size, new_aper_size; |
625 | unsigned aper_base, new_aper_base; |
626 | struct pci_dev *dev; |
627 | void *gatt; |
628 | int i; |
629 | |
630 | pr_info("PCI-DMA: Disabling AGP.\n" ); |
631 | |
632 | aper_size = aper_base = info->aper_size = 0; |
633 | dev = NULL; |
634 | for (i = 0; i < amd_nb_num(); i++) { |
635 | dev = node_to_amd_nb(i)->misc; |
636 | new_aper_base = read_aperture(dev, &new_aper_size); |
637 | if (!new_aper_base) |
638 | goto nommu; |
639 | |
640 | if (!aper_base) { |
641 | aper_size = new_aper_size; |
642 | aper_base = new_aper_base; |
643 | } |
644 | if (aper_size != new_aper_size || aper_base != new_aper_base) |
645 | goto nommu; |
646 | } |
647 | if (!aper_base) |
648 | goto nommu; |
649 | |
650 | info->aper_base = aper_base; |
651 | info->aper_size = aper_size >> 20; |
652 | |
653 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
654 | gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
655 | get_order(gatt_size)); |
656 | if (!gatt) |
657 | panic("Cannot allocate GATT table" ); |
658 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) |
659 | panic("Could not set GART PTEs to uncacheable pages" ); |
660 | |
661 | agp_gatt_table = gatt; |
662 | |
663 | register_syscore_ops(&gart_syscore_ops); |
664 | |
665 | flush_gart(); |
666 | |
667 | pr_info("PCI-DMA: aperture base @ %x size %u KB\n" , |
668 | aper_base, aper_size>>10); |
669 | |
670 | return 0; |
671 | |
672 | nommu: |
673 | /* Should not happen anymore */ |
674 | pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
675 | "falling back to iommu=soft.\n" ); |
676 | return -1; |
677 | } |
678 | |
679 | static const struct dma_map_ops gart_dma_ops = { |
680 | .map_sg = gart_map_sg, |
681 | .unmap_sg = gart_unmap_sg, |
682 | .map_page = gart_map_page, |
683 | .unmap_page = gart_unmap_page, |
684 | .alloc = gart_alloc_coherent, |
685 | .free = gart_free_coherent, |
686 | .dma_supported = dma_direct_supported, |
687 | }; |
688 | |
689 | static void gart_iommu_shutdown(void) |
690 | { |
691 | struct pci_dev *dev; |
692 | int i; |
693 | |
694 | /* don't shutdown it if there is AGP installed */ |
695 | if (!no_agp) |
696 | return; |
697 | |
698 | if (!amd_nb_has_feature(AMD_NB_GART)) |
699 | return; |
700 | |
701 | for (i = 0; i < amd_nb_num(); i++) { |
702 | u32 ctl; |
703 | |
704 | dev = node_to_amd_nb(i)->misc; |
705 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); |
706 | |
707 | ctl &= ~GARTEN; |
708 | |
709 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); |
710 | } |
711 | } |
712 | |
713 | int __init gart_iommu_init(void) |
714 | { |
715 | struct agp_kern_info info; |
716 | unsigned long iommu_start; |
717 | unsigned long aper_base, aper_size; |
718 | unsigned long start_pfn, end_pfn; |
719 | unsigned long scratch; |
720 | |
721 | if (!amd_nb_has_feature(AMD_NB_GART)) |
722 | return 0; |
723 | |
724 | #ifndef CONFIG_AGP_AMD64 |
725 | no_agp = 1; |
726 | #else |
727 | /* Makefile puts PCI initialization via subsys_initcall first. */ |
728 | /* Add other AMD AGP bridge drivers here */ |
729 | no_agp = no_agp || |
730 | (agp_amd64_init() < 0) || |
731 | (agp_copy_info(agp_bridge, &info) < 0); |
732 | #endif |
733 | |
734 | if (no_iommu || |
735 | (!force_iommu && max_pfn <= MAX_DMA32_PFN) || |
736 | !gart_iommu_aperture || |
737 | (no_agp && init_amd_gatt(&info) < 0)) { |
738 | if (max_pfn > MAX_DMA32_PFN) { |
739 | pr_warning("More than 4GB of memory but GART IOMMU not available.\n" ); |
740 | pr_warning("falling back to iommu=soft.\n" ); |
741 | } |
742 | return 0; |
743 | } |
744 | |
745 | /* need to map that range */ |
746 | aper_size = info.aper_size << 20; |
747 | aper_base = info.aper_base; |
748 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); |
749 | |
750 | start_pfn = PFN_DOWN(aper_base); |
751 | if (!pfn_range_is_mapped(start_pfn, end_pfn)) |
752 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); |
753 | |
754 | pr_info("PCI-DMA: using GART IOMMU.\n" ); |
755 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
756 | iommu_pages = iommu_size >> PAGE_SHIFT; |
757 | |
758 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
759 | get_order(iommu_pages/8)); |
760 | if (!iommu_gart_bitmap) |
761 | panic("Cannot allocate iommu bitmap\n" ); |
762 | |
763 | pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n" , |
764 | iommu_size >> 20); |
765 | |
766 | agp_memory_reserved = iommu_size; |
767 | iommu_start = aper_size - iommu_size; |
768 | iommu_bus_base = info.aper_base + iommu_start; |
769 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); |
770 | |
771 | /* |
772 | * Unmap the IOMMU part of the GART. The alias of the page is |
773 | * always mapped with cache enabled and there is no full cache |
774 | * coherency across the GART remapping. The unmapping avoids |
775 | * automatic prefetches from the CPU allocating cache lines in |
776 | * there. All CPU accesses are done via the direct mapping to |
777 | * the backing memory. The GART address is only used by PCI |
778 | * devices. |
779 | */ |
780 | set_memory_np((unsigned long)__va(iommu_bus_base), |
781 | iommu_size >> PAGE_SHIFT); |
782 | /* |
783 | * Tricky. The GART table remaps the physical memory range, |
784 | * so the CPU wont notice potential aliases and if the memory |
785 | * is remapped to UC later on, we might surprise the PCI devices |
786 | * with a stray writeout of a cacheline. So play it sure and |
787 | * do an explicit, full-scale wbinvd() _after_ having marked all |
788 | * the pages as Not-Present: |
789 | */ |
790 | wbinvd(); |
791 | |
792 | /* |
793 | * Now all caches are flushed and we can safely enable |
794 | * GART hardware. Doing it early leaves the possibility |
795 | * of stale cache entries that can lead to GART PTE |
796 | * errors. |
797 | */ |
798 | enable_gart_translations(); |
799 | |
800 | /* |
801 | * Try to workaround a bug (thanks to BenH): |
802 | * Set unmapped entries to a scratch page instead of 0. |
803 | * Any prefetches that hit unmapped entries won't get an bus abort |
804 | * then. (P2P bridge may be prefetching on DMA reads). |
805 | */ |
806 | scratch = get_zeroed_page(GFP_KERNEL); |
807 | if (!scratch) |
808 | panic("Cannot allocate iommu scratch page" ); |
809 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); |
810 | |
811 | flush_gart(); |
812 | dma_ops = &gart_dma_ops; |
813 | x86_platform.iommu_shutdown = gart_iommu_shutdown; |
814 | swiotlb = 0; |
815 | |
816 | return 0; |
817 | } |
818 | |
819 | void __init gart_parse_options(char *p) |
820 | { |
821 | int arg; |
822 | |
823 | if (isdigit(*p) && get_option(&p, &arg)) |
824 | iommu_size = arg; |
825 | if (!strncmp(p, "fullflush" , 9)) |
826 | iommu_fullflush = 1; |
827 | if (!strncmp(p, "nofullflush" , 11)) |
828 | iommu_fullflush = 0; |
829 | if (!strncmp(p, "noagp" , 5)) |
830 | no_agp = 1; |
831 | if (!strncmp(p, "noaperture" , 10)) |
832 | fix_aperture = 0; |
833 | /* duplicated from pci-dma.c */ |
834 | if (!strncmp(p, "force" , 5)) |
835 | gart_iommu_aperture_allowed = 1; |
836 | if (!strncmp(p, "allowed" , 7)) |
837 | gart_iommu_aperture_allowed = 1; |
838 | if (!strncmp(p, "memaper" , 7)) { |
839 | fallback_aper_force = 1; |
840 | p += 7; |
841 | if (*p == '=') { |
842 | ++p; |
843 | if (get_option(&p, &arg)) |
844 | fallback_aper_order = arg; |
845 | } |
846 | } |
847 | } |
848 | IOMMU_INIT_POST(gart_iommu_hole_init); |
849 | |