1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * PowerPC version derived from arch/arm/mm/consistent.c |
4 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) |
5 | * |
6 | * Copyright (C) 2000 Russell King |
7 | */ |
8 | |
9 | #include <linux/kernel.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/types.h> |
12 | #include <linux/highmem.h> |
13 | #include <linux/dma-direct.h> |
14 | #include <linux/dma-map-ops.h> |
15 | |
16 | #include <asm/tlbflush.h> |
17 | #include <asm/dma.h> |
18 | |
19 | /* |
20 | * make an area consistent. |
21 | */ |
22 | static void __dma_sync(void *vaddr, size_t size, int direction) |
23 | { |
24 | unsigned long start = (unsigned long)vaddr; |
25 | unsigned long end = start + size; |
26 | |
27 | switch (direction) { |
28 | case DMA_NONE: |
29 | BUG(); |
30 | case DMA_FROM_DEVICE: |
31 | /* |
32 | * invalidate only when cache-line aligned otherwise there is |
33 | * the potential for discarding uncommitted data from the cache |
34 | */ |
35 | if ((start | end) & (L1_CACHE_BYTES - 1)) |
36 | flush_dcache_range(start, end); |
37 | else |
38 | invalidate_dcache_range(start, end); |
39 | break; |
40 | case DMA_TO_DEVICE: /* writeback only */ |
41 | clean_dcache_range(start, end); |
42 | break; |
43 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ |
44 | flush_dcache_range(start, end); |
45 | break; |
46 | } |
47 | } |
48 | |
49 | #ifdef CONFIG_HIGHMEM |
50 | /* |
51 | * __dma_sync_page() implementation for systems using highmem. |
52 | * In this case, each page of a buffer must be kmapped/kunmapped |
53 | * in order to have a virtual address for __dma_sync(). This must |
54 | * not sleep so kmap_atomic()/kunmap_atomic() are used. |
55 | * |
56 | * Note: yes, it is possible and correct to have a buffer extend |
57 | * beyond the first page. |
58 | */ |
59 | static inline void __dma_sync_page_highmem(struct page *page, |
60 | unsigned long offset, size_t size, int direction) |
61 | { |
62 | size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); |
63 | size_t cur_size = seg_size; |
64 | unsigned long flags, start, seg_offset = offset; |
65 | int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; |
66 | int seg_nr = 0; |
67 | |
68 | local_irq_save(flags); |
69 | |
70 | do { |
71 | start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset; |
72 | |
73 | /* Sync this buffer segment */ |
74 | __dma_sync((void *)start, seg_size, direction); |
75 | kunmap_atomic((void *)start); |
76 | seg_nr++; |
77 | |
78 | /* Calculate next buffer segment size */ |
79 | seg_size = min((size_t)PAGE_SIZE, size - cur_size); |
80 | |
81 | /* Add the segment size to our running total */ |
82 | cur_size += seg_size; |
83 | seg_offset = 0; |
84 | } while (seg_nr < nr_segs); |
85 | |
86 | local_irq_restore(flags); |
87 | } |
88 | #endif /* CONFIG_HIGHMEM */ |
89 | |
90 | /* |
91 | * __dma_sync_page makes memory consistent. identical to __dma_sync, but |
92 | * takes a struct page instead of a virtual address |
93 | */ |
94 | static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir) |
95 | { |
96 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
97 | unsigned offset = paddr & ~PAGE_MASK; |
98 | |
99 | #ifdef CONFIG_HIGHMEM |
100 | __dma_sync_page_highmem(page, offset, size, dir); |
101 | #else |
102 | unsigned long start = (unsigned long)page_address(page) + offset; |
103 | __dma_sync(vaddr: (void *)start, size, direction: dir); |
104 | #endif |
105 | } |
106 | |
107 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
108 | enum dma_data_direction dir) |
109 | { |
110 | __dma_sync_page(paddr, size, dir); |
111 | } |
112 | |
113 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
114 | enum dma_data_direction dir) |
115 | { |
116 | __dma_sync_page(paddr, size, dir); |
117 | } |
118 | |
119 | void arch_dma_prep_coherent(struct page *page, size_t size) |
120 | { |
121 | unsigned long kaddr = (unsigned long)page_address(page); |
122 | |
123 | flush_dcache_range(kaddr, kaddr + size); |
124 | } |
125 | |