1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Copyright (C) Paul Mackerras 1997. |
4 | * |
5 | * NOTE: this code runs in 32 bit mode and is packaged as ELF32. |
6 | */ |
7 | |
8 | #include "ppc_asm.h" |
9 | |
10 | .text |
11 | .globl strcpy |
12 | strcpy: |
13 | addi r5,r3,-1 |
14 | addi r4,r4,-1 |
15 | 1: lbzu r0,1(r4) |
16 | cmpwi 0,r0,0 |
17 | stbu r0,1(r5) |
18 | bne 1b |
19 | blr |
20 | |
21 | .globl strncpy |
22 | strncpy: |
23 | cmpwi 0,r5,0 |
24 | beqlr |
25 | mtctr r5 |
26 | addi r6,r3,-1 |
27 | addi r4,r4,-1 |
28 | 1: lbzu r0,1(r4) |
29 | cmpwi 0,r0,0 |
30 | stbu r0,1(r6) |
31 | bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ |
32 | blr |
33 | |
34 | .globl strcat |
35 | strcat: |
36 | addi r5,r3,-1 |
37 | addi r4,r4,-1 |
38 | 1: lbzu r0,1(r5) |
39 | cmpwi 0,r0,0 |
40 | bne 1b |
41 | addi r5,r5,-1 |
42 | 1: lbzu r0,1(r4) |
43 | cmpwi 0,r0,0 |
44 | stbu r0,1(r5) |
45 | bne 1b |
46 | blr |
47 | |
48 | .globl strchr |
49 | strchr: |
50 | addi r3,r3,-1 |
51 | 1: lbzu r0,1(r3) |
52 | cmpw 0,r0,r4 |
53 | beqlr |
54 | cmpwi 0,r0,0 |
55 | bne 1b |
56 | li r3,0 |
57 | blr |
58 | |
59 | .globl strcmp |
60 | strcmp: |
61 | addi r5,r3,-1 |
62 | addi r4,r4,-1 |
63 | 1: lbzu r3,1(r5) |
64 | cmpwi 1,r3,0 |
65 | lbzu r0,1(r4) |
66 | subf. r3,r0,r3 |
67 | beqlr 1 |
68 | beq 1b |
69 | blr |
70 | |
71 | .globl strncmp |
72 | strncmp: |
73 | mtctr r5 |
74 | addi r5,r3,-1 |
75 | addi r4,r4,-1 |
76 | 1: lbzu r3,1(r5) |
77 | cmpwi 1,r3,0 |
78 | lbzu r0,1(r4) |
79 | subf. r3,r0,r3 |
80 | beqlr 1 |
81 | bdnzt eq,1b |
82 | blr |
83 | |
84 | .globl strlen |
85 | strlen: |
86 | addi r4,r3,-1 |
87 | 1: lbzu r0,1(r4) |
88 | cmpwi 0,r0,0 |
89 | bne 1b |
90 | subf r3,r3,r4 |
91 | blr |
92 | |
93 | .globl memset |
94 | memset: |
95 | rlwimi r4,r4,8,16,23 |
96 | rlwimi r4,r4,16,0,15 |
97 | addi r6,r3,-4 |
98 | cmplwi 0,r5,4 |
99 | blt 7f |
100 | stwu r4,4(r6) |
101 | beqlr |
102 | andi. r0,r6,3 |
103 | add r5,r0,r5 |
104 | subf r6,r0,r6 |
105 | rlwinm r0,r5,32-2,2,31 |
106 | mtctr r0 |
107 | bdz 6f |
108 | 1: stwu r4,4(r6) |
109 | bdnz 1b |
110 | 6: andi. r5,r5,3 |
111 | 7: cmpwi 0,r5,0 |
112 | beqlr |
113 | mtctr r5 |
114 | addi r6,r6,3 |
115 | 8: stbu r4,1(r6) |
116 | bdnz 8b |
117 | blr |
118 | |
119 | .globl memmove |
120 | memmove: |
121 | cmplw 0,r3,r4 |
122 | bgt backwards_memcpy |
123 | /* fall through */ |
124 | |
125 | .globl memcpy |
126 | memcpy: |
127 | rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */ |
128 | addi r6,r3,-4 |
129 | addi r4,r4,-4 |
130 | beq 3f /* if less than 8 bytes to do */ |
131 | andi. r0,r6,3 /* get dest word aligned */ |
132 | mtctr r7 |
133 | bne 5f |
134 | andi. r0,r4,3 /* check src word aligned too */ |
135 | bne 3f |
136 | 1: lwz r7,4(r4) |
137 | lwzu r8,8(r4) |
138 | stw r7,4(r6) |
139 | stwu r8,8(r6) |
140 | bdnz 1b |
141 | andi. r5,r5,7 |
142 | 2: cmplwi 0,r5,4 |
143 | blt 3f |
144 | lwzu r0,4(r4) |
145 | addi r5,r5,-4 |
146 | stwu r0,4(r6) |
147 | 3: cmpwi 0,r5,0 |
148 | beqlr |
149 | mtctr r5 |
150 | addi r4,r4,3 |
151 | addi r6,r6,3 |
152 | 4: lbzu r0,1(r4) |
153 | stbu r0,1(r6) |
154 | bdnz 4b |
155 | blr |
156 | 5: subfic r0,r0,4 |
157 | cmpw cr1,r0,r5 |
158 | add r7,r0,r4 |
159 | andi. r7,r7,3 /* will source be word-aligned too? */ |
160 | ble cr1,3b |
161 | bne 3b /* do byte-by-byte if not */ |
162 | mtctr r0 |
163 | 6: lbz r7,4(r4) |
164 | addi r4,r4,1 |
165 | stb r7,4(r6) |
166 | addi r6,r6,1 |
167 | bdnz 6b |
168 | subf r5,r0,r5 |
169 | rlwinm. r7,r5,32-3,3,31 |
170 | beq 2b |
171 | mtctr r7 |
172 | b 1b |
173 | |
174 | .globl backwards_memcpy |
175 | backwards_memcpy: |
176 | rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */ |
177 | add r6,r3,r5 |
178 | add r4,r4,r5 |
179 | beq 3f |
180 | andi. r0,r6,3 |
181 | mtctr r7 |
182 | bne 5f |
183 | andi. r0,r4,3 |
184 | bne 3f |
185 | 1: lwz r7,-4(r4) |
186 | lwzu r8,-8(r4) |
187 | stw r7,-4(r6) |
188 | stwu r8,-8(r6) |
189 | bdnz 1b |
190 | andi. r5,r5,7 |
191 | 2: cmplwi 0,r5,4 |
192 | blt 3f |
193 | lwzu r0,-4(r4) |
194 | subi r5,r5,4 |
195 | stwu r0,-4(r6) |
196 | 3: cmpwi 0,r5,0 |
197 | beqlr |
198 | mtctr r5 |
199 | 4: lbzu r0,-1(r4) |
200 | stbu r0,-1(r6) |
201 | bdnz 4b |
202 | blr |
203 | 5: cmpw cr1,r0,r5 |
204 | subf r7,r0,r4 |
205 | andi. r7,r7,3 |
206 | ble cr1,3b |
207 | bne 3b |
208 | mtctr r0 |
209 | 6: lbzu r7,-1(r4) |
210 | stbu r7,-1(r6) |
211 | bdnz 6b |
212 | subf r5,r0,r5 |
213 | rlwinm. r7,r5,32-3,3,31 |
214 | beq 2b |
215 | mtctr r7 |
216 | b 1b |
217 | |
218 | .globl memchr |
219 | memchr: |
220 | cmpwi 0,r5,0 |
221 | blelr |
222 | mtctr r5 |
223 | addi r3,r3,-1 |
224 | 1: lbzu r0,1(r3) |
225 | cmpw r0,r4 |
226 | beqlr |
227 | bdnz 1b |
228 | li r3,0 |
229 | blr |
230 | |
231 | .globl memcmp |
232 | memcmp: |
233 | cmpwi 0,r5,0 |
234 | ble 2f |
235 | mtctr r5 |
236 | addi r6,r3,-1 |
237 | addi r4,r4,-1 |
238 | 1: lbzu r3,1(r6) |
239 | lbzu r0,1(r4) |
240 | subf. r3,r0,r3 |
241 | bdnzt 2,1b |
242 | blr |
243 | 2: li r3,0 |
244 | blr |
245 | |
246 | |
247 | /* |
248 | * Flush the dcache and invalidate the icache for a range of addresses. |
249 | * |
250 | * flush_cache(addr, len) |
251 | */ |
252 | .global flush_cache |
253 | flush_cache: |
254 | addi 4,4,0x1f /* len = (len + 0x1f) / 0x20 */ |
255 | rlwinm. 4,4,27,5,31 |
256 | mtctr 4 |
257 | beqlr |
258 | 1: dcbf 0,3 |
259 | icbi 0,3 |
260 | addi 3,3,0x20 |
261 | bdnz 1b |
262 | sync |
263 | isync |
264 | blr |
265 | |
266 | |