1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/arch/arm/mm/copypage-xsc3.S |
4 | * |
5 | * Copyright (C) 2004 Intel Corp. |
6 | * |
7 | * Adapted for 3rd gen XScale core, no more mini-dcache |
8 | * Author: Matt Gilbert (matthew.m.gilbert@intel.com) |
9 | */ |
10 | #include <linux/init.h> |
11 | #include <linux/highmem.h> |
12 | |
13 | /* |
14 | * General note: |
15 | * We don't really want write-allocate cache behaviour for these functions |
16 | * since that will just eat through 8K of the cache. |
17 | */ |
18 | |
19 | /* |
20 | * XSC3 optimised copy_user_highpage |
21 | * |
22 | * The source page may have some clean entries in the cache already, but we |
23 | * can safely ignore them - break_cow() will flush them out of the cache |
24 | * if we eventually end up using our copied page. |
25 | * |
26 | */ |
27 | static void xsc3_mc_copy_user_page(void *kto, const void *kfrom) |
28 | { |
29 | int tmp; |
30 | |
31 | asm volatile ("\ |
32 | .arch xscale \n\ |
33 | pld [%1, #0] \n\ |
34 | pld [%1, #32] \n\ |
35 | 1: pld [%1, #64] \n\ |
36 | pld [%1, #96] \n\ |
37 | \n\ |
38 | 2: ldrd r2, r3, [%1], #8 \n\ |
39 | ldrd r4, r5, [%1], #8 \n\ |
40 | mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ |
41 | strd r2, r3, [%0], #8 \n\ |
42 | ldrd r2, r3, [%1], #8 \n\ |
43 | strd r4, r5, [%0], #8 \n\ |
44 | ldrd r4, r5, [%1], #8 \n\ |
45 | strd r2, r3, [%0], #8 \n\ |
46 | strd r4, r5, [%0], #8 \n\ |
47 | ldrd r2, r3, [%1], #8 \n\ |
48 | ldrd r4, r5, [%1], #8 \n\ |
49 | mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ |
50 | strd r2, r3, [%0], #8 \n\ |
51 | ldrd r2, r3, [%1], #8 \n\ |
52 | subs %2, %2, #1 \n\ |
53 | strd r4, r5, [%0], #8 \n\ |
54 | ldrd r4, r5, [%1], #8 \n\ |
55 | strd r2, r3, [%0], #8 \n\ |
56 | strd r4, r5, [%0], #8 \n\ |
57 | bgt 1b \n\ |
58 | beq 2b " |
59 | : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) |
60 | : "2" (PAGE_SIZE / 64 - 1) |
61 | : "r2" , "r3" , "r4" , "r5" ); |
62 | } |
63 | |
64 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, |
65 | unsigned long vaddr, struct vm_area_struct *vma) |
66 | { |
67 | void *kto, *kfrom; |
68 | |
69 | kto = kmap_atomic(page: to); |
70 | kfrom = kmap_atomic(page: from); |
71 | flush_cache_page(vma, vmaddr: vaddr, page_to_pfn(from)); |
72 | xsc3_mc_copy_user_page(kto, kfrom); |
73 | kunmap_atomic(kfrom); |
74 | kunmap_atomic(kto); |
75 | } |
76 | |
77 | /* |
78 | * XScale optimised clear_user_page |
79 | */ |
80 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
81 | { |
82 | void *ptr, *kaddr = kmap_atomic(page); |
83 | asm volatile ("\ |
84 | .arch xscale \n\ |
85 | mov r1, %2 \n\ |
86 | mov r2, #0 \n\ |
87 | mov r3, #0 \n\ |
88 | 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ |
89 | strd r2, r3, [%0], #8 \n\ |
90 | strd r2, r3, [%0], #8 \n\ |
91 | strd r2, r3, [%0], #8 \n\ |
92 | strd r2, r3, [%0], #8 \n\ |
93 | subs r1, r1, #1 \n\ |
94 | bne 1b" |
95 | : "=r" (ptr) |
96 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
97 | : "r1" , "r2" , "r3" ); |
98 | kunmap_atomic(kaddr); |
99 | } |
100 | |
101 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { |
102 | .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, |
103 | .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, |
104 | }; |
105 | |