1/* Copyright (C) 1996-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <https://www.gnu.org/licenses/>. */
17
18/* Copy a null-terminated string from SRC to DST.
19
20 This is an internal routine used by strcpy, stpcpy, and strcat.
21 As such, it uses special linkage conventions to make implementation
22 of these public functions more efficient.
23
24 On input:
25 t9 = return address
26 a0 = DST
27 a1 = SRC
28
29 On output:
30 t8 = bitmask (with one bit set) indicating the last byte written
31 a0 = unaligned address of the last *word* written
32
33 Furthermore, v0, a3-a5, t11, and t12 are untouched.
34*/
35
36/* This is generally scheduled for the EV5, but should still be pretty
37 good for the EV4 too. */
38
39#include <sysdep.h>
40
41 .set noat
42 .set noreorder
43
44 .text
45 .type __stxcpy, @function
46 .globl __stxcpy
47 .usepv __stxcpy, no
48
49 cfi_startproc
50 cfi_return_column (t9)
51
52 /* On entry to this basic block:
53 t0 == the first destination word for masking back in
54 t1 == the first source word. */
55 .align 3
56stxcpy_aligned:
57 /* Create the 1st output word and detect 0's in the 1st input word. */
58 lda t2, -1 # e1 : build a mask against false zero
59 mskqh t2, a1, t2 # e0 : detection in the src word
60 mskqh t1, a1, t3 # e0 :
61 ornot t1, t2, t2 # .. e1 :
62 mskql t0, a1, t0 # e0 : assemble the first output word
63 cmpbge zero, t2, t7 # .. e1 : bits set iff null found
64 or t0, t3, t1 # e0 :
65 bne t7, $a_eos # .. e1 :
66
67 /* On entry to this basic block:
68 t0 == the first destination word for masking back in
69 t1 == a source word not containing a null. */
70$a_loop:
71 stq_u t1, 0(a0) # e0 :
72 addq a0, 8, a0 # .. e1 :
73 ldq_u t1, 0(a1) # e0 :
74 addq a1, 8, a1 # .. e1 :
75 cmpbge zero, t1, t7 # e0 (stall)
76 beq t7, $a_loop # .. e1 (zdb)
77
78 /* Take care of the final (partial) word store.
79 On entry to this basic block we have:
80 t1 == the source word containing the null
81 t7 == the cmpbge mask that found it. */
82$a_eos:
83 negq t7, t6 # e0 : find low bit set
84 and t7, t6, t8 # e1 (stall)
85
86 /* For the sake of the cache, don't read a destination word
87 if we're not going to need it. */
88 and t8, 0x80, t6 # e0 :
89 bne t6, 1f # .. e1 (zdb)
90
91 /* We're doing a partial word store and so need to combine
92 our source and original destination words. */
93 ldq_u t0, 0(a0) # e0 :
94 subq t8, 1, t6 # .. e1 :
95 zapnot t1, t6, t1 # e0 : clear src bytes >= null
96 or t8, t6, t7 # .. e1 :
97 zap t0, t7, t0 # e0 : clear dst bytes <= null
98 or t0, t1, t1 # e1 :
99
1001: stq_u t1, 0(a0) # e0 :
101 ret (t9) # .. e1 :
102
103 .align 3
104__stxcpy:
105 /* Are source and destination co-aligned? */
106 xor a0, a1, t0 # e0 :
107 unop # :
108 and t0, 7, t0 # e0 :
109 bne t0, $unaligned # .. e1 :
110
111 /* We are co-aligned; take care of a partial first word. */
112 ldq_u t1, 0(a1) # e0 : load first src word
113 and a0, 7, t0 # .. e1 : take care not to load a word ...
114 addq a1, 8, a1 # e0 :
115 beq t0, stxcpy_aligned # .. e1 : ... if we wont need it
116 ldq_u t0, 0(a0) # e0 :
117 br stxcpy_aligned # .. e1 :
118
119
120/* The source and destination are not co-aligned. Align the destination
121 and cope. We have to be very careful about not reading too much and
122 causing a SEGV. */
123
124 .align 3
125$u_head:
126 /* We know just enough now to be able to assemble the first
127 full source word. We can still find a zero at the end of it
128 that prevents us from outputting the whole thing.
129
130 On entry to this basic block:
131 t0 == the first dest word, for masking back in, if needed else 0
132 t1 == the low bits of the first source word
133 t6 == bytemask that is -1 in dest word bytes */
134
135 ldq_u t2, 8(a1) # e0 :
136 addq a1, 8, a1 # .. e1 :
137
138 extql t1, a1, t1 # e0 :
139 extqh t2, a1, t4 # e0 :
140 mskql t0, a0, t0 # e0 :
141 or t1, t4, t1 # .. e1 :
142 mskqh t1, a0, t1 # e0 :
143 or t0, t1, t1 # e1 :
144
145 or t1, t6, t6 # e0 :
146 cmpbge zero, t6, t7 # .. e1 :
147 lda t6, -1 # e0 : for masking just below
148 bne t7, $u_final # .. e1 :
149
150 mskql t6, a1, t6 # e0 : mask out the bits we have
151 or t6, t2, t2 # e1 : already extracted before
152 cmpbge zero, t2, t7 # e0 : testing eos
153 bne t7, $u_late_head_exit # .. e1 (zdb)
154
155 /* Finally, we've got all the stupid leading edge cases taken care
156 of and we can set up to enter the main loop. */
157
158 stq_u t1, 0(a0) # e0 : store first output word
159 addq a0, 8, a0 # .. e1 :
160 extql t2, a1, t0 # e0 : position ho-bits of lo word
161 ldq_u t2, 8(a1) # .. e1 : read next high-order source word
162 addq a1, 8, a1 # e0 :
163 cmpbge zero, t2, t7 # .. e1 :
164 nop # e0 :
165 bne t7, $u_eos # .. e1 :
166
167 /* Unaligned copy main loop. In order to avoid reading too much,
168 the loop is structured to detect zeros in aligned source words.
169 This has, unfortunately, effectively pulled half of a loop
170 iteration out into the head and half into the tail, but it does
171 prevent nastiness from accumulating in the very thing we want
172 to run as fast as possible.
173
174 On entry to this basic block:
175 t0 == the shifted high-order bits from the previous source word
176 t2 == the unshifted current source word
177
178 We further know that t2 does not contain a null terminator. */
179
180 .align 3
181$u_loop:
182 extqh t2, a1, t1 # e0 : extract high bits for current word
183 addq a1, 8, a1 # .. e1 :
184 extql t2, a1, t3 # e0 : extract low bits for next time
185 addq a0, 8, a0 # .. e1 :
186 or t0, t1, t1 # e0 : current dst word now complete
187 ldq_u t2, 0(a1) # .. e1 : load high word for next time
188 stq_u t1, -8(a0) # e0 : save the current word
189 mov t3, t0 # .. e1 :
190 cmpbge zero, t2, t7 # e0 : test new word for eos
191 beq t7, $u_loop # .. e1 :
192
193 /* We've found a zero somewhere in the source word we just read.
194 If it resides in the lower half, we have one (probably partial)
195 word to write out, and if it resides in the upper half, we
196 have one full and one partial word left to write out.
197
198 On entry to this basic block:
199 t0 == the shifted high-order bits from the previous source word
200 t2 == the unshifted current source word. */
201$u_eos:
202 extqh t2, a1, t1 # e0 :
203 or t0, t1, t1 # e1 : first (partial) source word complete
204
205 cmpbge zero, t1, t7 # e0 : is the null in this first bit?
206 bne t7, $u_final # .. e1 (zdb)
207
208$u_late_head_exit:
209 stq_u t1, 0(a0) # e0 : the null was in the high-order bits
210 addq a0, 8, a0 # .. e1 :
211 extql t2, a1, t1 # e0 :
212 cmpbge zero, t1, t7 # .. e1 :
213
214 /* Take care of a final (probably partial) result word.
215 On entry to this basic block:
216 t1 == assembled source word
217 t7 == cmpbge mask that found the null. */
218$u_final:
219 negq t7, t6 # e0 : isolate low bit set
220 and t6, t7, t8 # e1 :
221
222 and t8, 0x80, t6 # e0 : avoid dest word load if we can
223 bne t6, 1f # .. e1 (zdb)
224
225 ldq_u t0, 0(a0) # e0 :
226 subq t8, 1, t6 # .. e1 :
227 or t6, t8, t7 # e0 :
228 zapnot t1, t6, t1 # .. e1 : kill source bytes >= null
229 zap t0, t7, t0 # e0 : kill dest bytes <= null
230 or t0, t1, t1 # e1 :
231
2321: stq_u t1, 0(a0) # e0 :
233 ret (t9) # .. e1 :
234
235 /* Unaligned copy entry point. */
236 .align 3
237$unaligned:
238
239 ldq_u t1, 0(a1) # e0 : load first source word
240
241 and a0, 7, t4 # .. e1 : find dest misalignment
242 and a1, 7, t5 # e0 : find src misalignment
243
244 /* Conditionally load the first destination word and a bytemask
245 with 0xff indicating that the destination byte is sacrosanct. */
246
247 mov zero, t0 # .. e1 :
248 mov zero, t6 # e0 :
249 beq t4, 1f # .. e1 :
250 ldq_u t0, 0(a0) # e0 :
251 lda t6, -1 # .. e1 :
252 mskql t6, a0, t6 # e0 :
2531:
254 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
255
256 /* If source misalignment is larger than dest misalignment, we need
257 extra startup checks to avoid SEGV. */
258
259 cmplt t4, t5, t8 # e0 :
260 beq t8, $u_head # .. e1 (zdb)
261
262 lda t2, -1 # e1 : mask out leading garbage in source
263 mskqh t2, t5, t2 # e0 :
264 nop # e0 :
265 ornot t1, t2, t3 # .. e1 :
266 cmpbge zero, t3, t7 # e0 : is there a zero?
267 beq t7, $u_head # .. e1 (zdb)
268
269 /* At this point we've found a zero in the first partial word of
270 the source. We need to isolate the valid source data and mask
271 it into the original destination data. (Incidentally, we know
272 that we'll need at least one byte of that original dest word.) */
273
274 ldq_u t0, 0(a0) # e0 :
275
276 negq t7, t6 # .. e1 : build bitmask of bytes <= zero
277 and t6, t7, t8 # e0 :
278 and a1, 7, t5 # .. e1 :
279 subq t8, 1, t6 # e0 :
280 or t6, t8, t7 # e1 :
281 srl t8, t5, t8 # e0 : adjust final null return value
282
283 zapnot t2, t7, t2 # .. e1 : prepare source word; mirror changes
284 and t1, t2, t1 # e1 : to source validity mask
285 extql t2, a1, t2 # .. e0 :
286 extql t1, a1, t1 # e0 :
287
288 andnot t0, t2, t0 # .. e1 : zero place for source to reside
289 or t0, t1, t1 # e1 : and put it there
290 stq_u t1, 0(a0) # .. e0 :
291 ret (t9)
292
293 cfi_endproc
294

source code of glibc/sysdeps/alpha/stxcpy.S