1/* Copyright (C) 2000-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <https://www.gnu.org/licenses/>. */
17
18/* Copy no more than COUNT bytes of the null-terminated string from
19 SRC to DST.
20
21 This is an internal routine used by strncpy, stpncpy, and strncat.
22 As such, it uses special linkage conventions to make implementation
23 of these public functions more efficient.
24
25 On input:
26 t9 = return address
27 a0 = DST
28 a1 = SRC
29 a2 = COUNT
30
31 Furthermore, COUNT may not be zero.
32
33 On output:
34 t0 = last word written
35 t8 = bitmask (with one bit set) indicating the last byte written
36 t10 = bitmask (with one bit set) indicating the byte position of
37 the end of the range specified by COUNT
38 a0 = unaligned address of the last *word* written
39 a2 = the number of full words left in COUNT
40
41 Furthermore, v0, a3-a5, t11, and t12 are untouched.
42*/
43
44#include <sysdep.h>
45
46 .arch ev6
47 .set noat
48 .set noreorder
49
50 .text
51 .type __stxncpy, @function
52 .globl __stxncpy
53 .usepv __stxncpy, no
54
55 cfi_startproc
56 cfi_return_column (t9)
57
58 /* On entry to this basic block:
59 t0 == the first destination word for masking back in
60 t1 == the first source word. */
61 .align 4
62stxncpy_aligned:
63 /* Create the 1st output word and detect 0's in the 1st input word. */
64 lda t2, -1 # E : build a mask against false zero
65 mskqh t2, a1, t2 # U : detection in the src word (stall)
66 mskqh t1, a1, t3 # U :
67 ornot t1, t2, t2 # E : (stall)
68
69 mskql t0, a1, t0 # U : assemble the first output word
70 cmpbge zero, t2, t7 # E : bits set iff null found
71 or t0, t3, t0 # E : (stall)
72 beq a2, $a_eoc # U :
73
74 bne t7, $a_eos # U :
75 nop
76 nop
77 nop
78
79 /* On entry to this basic block:
80 t0 == a source word not containing a null. */
81
82 /*
83 * nops here to:
84 * separate store quads from load quads
85 * limit of 1 bcond/quad to permit training
86 */
87$a_loop:
88 stq_u t0, 0(a0) # L :
89 addq a0, 8, a0 # E :
90 subq a2, 1, a2 # E :
91 nop
92
93 ldq_u t0, 0(a1) # L :
94 addq a1, 8, a1 # E :
95 cmpbge zero, t0, t7 # E :
96 beq a2, $a_eoc # U :
97
98 beq t7, $a_loop # U :
99 nop
100 nop
101 nop
102
103 /* Take care of the final (partial) word store. At this point
104 the end-of-count bit is set in t7 iff it applies.
105
106 On entry to this basic block we have:
107 t0 == the source word containing the null
108 t7 == the cmpbge mask that found it. */
109$a_eos:
110 negq t7, t8 # E : find low bit set
111 and t7, t8, t8 # E : (stall)
112 /* For the sake of the cache, don't read a destination word
113 if we're not going to need it. */
114 and t8, 0x80, t6 # E : (stall)
115 bne t6, 1f # U : (stall)
116
117 /* We're doing a partial word store and so need to combine
118 our source and original destination words. */
119 ldq_u t1, 0(a0) # L :
120 subq t8, 1, t6 # E :
121 or t8, t6, t7 # E : (stall)
122 zapnot t0, t7, t0 # U : clear src bytes > null (stall)
123
124 zap t1, t7, t1 # .. e1 : clear dst bytes <= null
125 or t0, t1, t0 # e1 : (stall)
126 nop
127 nop
128
1291: stq_u t0, 0(a0) # L :
130 ret (t9) # L0 : Latency=3
131 nop
132 nop
133
134 /* Add the end-of-count bit to the eos detection bitmask. */
135$a_eoc:
136 or t10, t7, t7 # E :
137 br $a_eos # L0 : Latency=3
138 nop
139 nop
140
141 .align 4
142__stxncpy:
143 /* Are source and destination co-aligned? */
144 lda t2, -1 # E :
145 xor a0, a1, t1 # E :
146 and a0, 7, t0 # E : find dest misalignment
147 nop # E :
148
149 srl t2, 1, t2 # U :
150 and t1, 7, t1 # E :
151 cmovlt a2, t2, a2 # E : bound count to LONG_MAX (stall)
152 nop # E :
153
154 addq a2, t0, a2 # E : bias count by dest misalignment
155 subq a2, 1, a2 # E : (stall)
156 and a2, 7, t2 # E : (stall)
157 lda t10, 1 # E :
158
159 srl a2, 3, a2 # U : a2 = loop counter = (count - 1)/8
160 sll t10, t2, t10 # U : t10 = bitmask of last count byte
161 nop # E :
162 bne t1, $unaligned # U : (stall)
163
164 /* We are co-aligned; take care of a partial first word. */
165 ldq_u t1, 0(a1) # L : load first src word
166 addq a1, 8, a1 # E :
167 beq t0, stxncpy_aligned # U : avoid loading dest word if not needed
168 ldq_u t0, 0(a0) # L :
169
170 br stxncpy_aligned # U :
171 nop
172 nop
173 nop
174
175
176
177/* The source and destination are not co-aligned. Align the destination
178 and cope. We have to be very careful about not reading too much and
179 causing a SEGV. */
180
181 .align 4
182$u_head:
183 /* We know just enough now to be able to assemble the first
184 full source word. We can still find a zero at the end of it
185 that prevents us from outputting the whole thing.
186
187 On entry to this basic block:
188 t0 == the first dest word, unmasked
189 t1 == the shifted low bits of the first source word
190 t6 == bytemask that is -1 in dest word bytes */
191
192 ldq_u t2, 8(a1) # L : Latency=3 load second src word
193 addq a1, 8, a1 # E :
194 mskql t0, a0, t0 # U : mask trailing garbage in dst
195 extqh t2, a1, t4 # U : (3 cycle stall on t2)
196
197 or t1, t4, t1 # E : first aligned src word complete (stall)
198 mskqh t1, a0, t1 # U : mask leading garbage in src (stall)
199 or t0, t1, t0 # E : first output word complete (stall)
200 or t0, t6, t6 # E : mask original data for zero test (stall)
201
202 cmpbge zero, t6, t7 # E :
203 beq a2, $u_eocfin # U :
204 lda t6, -1 # E :
205 nop
206
207 bne t7, $u_final # U :
208 mskql t6, a1, t6 # U : mask out bits already seen
209 stq_u t0, 0(a0) # L : store first output word
210 or t6, t2, t2 # E :
211
212 cmpbge zero, t2, t7 # E : find nulls in second partial
213 addq a0, 8, a0 # E :
214 subq a2, 1, a2 # E :
215 bne t7, $u_late_head_exit # U :
216
217 /* Finally, we've got all the stupid leading edge cases taken care
218 of and we can set up to enter the main loop. */
219 extql t2, a1, t1 # U : position hi-bits of lo word
220 beq a2, $u_eoc # U :
221 ldq_u t2, 8(a1) # L : read next high-order source word
222 addq a1, 8, a1 # E :
223
224 extqh t2, a1, t0 # U : position lo-bits of hi word (stall)
225 cmpbge zero, t2, t7 # E :
226 nop
227 bne t7, $u_eos # U :
228
229 /* Unaligned copy main loop. In order to avoid reading too much,
230 the loop is structured to detect zeros in aligned source words.
231 This has, unfortunately, effectively pulled half of a loop
232 iteration out into the head and half into the tail, but it does
233 prevent nastiness from accumulating in the very thing we want
234 to run as fast as possible.
235
236 On entry to this basic block:
237 t0 == the shifted low-order bits from the current source word
238 t1 == the shifted high-order bits from the previous source word
239 t2 == the unshifted current source word
240
241 We further know that t2 does not contain a null terminator. */
242
243 .align 4
244$u_loop:
245 or t0, t1, t0 # E : current dst word now complete
246 subq a2, 1, a2 # E : decrement word count
247 extql t2, a1, t1 # U : extract high bits for next time
248 addq a0, 8, a0 # E :
249
250 stq_u t0, -8(a0) # L : save the current word
251 beq a2, $u_eoc # U :
252 ldq_u t2, 8(a1) # L : Latency=3 load high word for next time
253 addq a1, 8, a1 # E :
254
255 extqh t2, a1, t0 # U : extract low bits (2 cycle stall)
256 cmpbge zero, t2, t7 # E : test new word for eos
257 nop
258 beq t7, $u_loop # U :
259
260 /* We've found a zero somewhere in the source word we just read.
261 If it resides in the lower half, we have one (probably partial)
262 word to write out, and if it resides in the upper half, we
263 have one full and one partial word left to write out.
264
265 On entry to this basic block:
266 t0 == the shifted low-order bits from the current source word
267 t1 == the shifted high-order bits from the previous source word
268 t2 == the unshifted current source word. */
269$u_eos:
270 or t0, t1, t0 # E : first (partial) source word complete
271 nop
272 cmpbge zero, t0, t7 # E : is the null in this first bit? (stall)
273 bne t7, $u_final # U : (stall)
274
275 stq_u t0, 0(a0) # L : the null was in the high-order bits
276 addq a0, 8, a0 # E :
277 subq a2, 1, a2 # E :
278 nop
279
280$u_late_head_exit:
281 extql t2, a1, t0 # U :
282 cmpbge zero, t0, t7 # E :
283 or t7, t10, t6 # E : (stall)
284 cmoveq a2, t6, t7 # E : Latency=2, extra map slot (stall)
285
286 /* Take care of a final (probably partial) result word.
287 On entry to this basic block:
288 t0 == assembled source word
289 t7 == cmpbge mask that found the null. */
290$u_final:
291 negq t7, t6 # E : isolate low bit set
292 and t6, t7, t8 # E : (stall)
293 and t8, 0x80, t6 # E : avoid dest word load if we can (stall)
294 bne t6, 1f # U : (stall)
295
296 ldq_u t1, 0(a0) # L :
297 subq t8, 1, t6 # E :
298 or t6, t8, t7 # E : (stall)
299 zapnot t0, t7, t0 # U : kill source bytes > null
300
301 zap t1, t7, t1 # U : kill dest bytes <= null
302 or t0, t1, t0 # E : (stall)
303 nop
304 nop
305
3061: stq_u t0, 0(a0) # L :
307 ret (t9) # L0 : Latency=3
308
309 /* Got to end-of-count before end of string.
310 On entry to this basic block:
311 t1 == the shifted high-order bits from the previous source word */
312$u_eoc:
313 and a1, 7, t6 # E :
314 sll t10, t6, t6 # U : (stall)
315 and t6, 0xff, t6 # E : (stall)
316 bne t6, 1f # U : (stall)
317
318 ldq_u t2, 8(a1) # L : load final src word
319 nop
320 extqh t2, a1, t0 # U : extract low bits for last word (stall)
321 or t1, t0, t1 # E : (stall)
322
3231: cmpbge zero, t1, t7 # E :
324 mov t1, t0
325
326$u_eocfin: # end-of-count, final word
327 or t10, t7, t7 # E :
328 br $u_final # L0 : Latency=3
329
330 /* Unaligned copy entry point. */
331 .align 4
332$unaligned:
333
334 ldq_u t1, 0(a1) # L : load first source word
335 and a0, 7, t4 # E : find dest misalignment
336 and a1, 7, t5 # E : find src misalignment
337 /* Conditionally load the first destination word and a bytemask
338 with 0xff indicating that the destination byte is sacrosanct. */
339 mov zero, t0 # E :
340
341 mov zero, t6 # E :
342 beq t4, 1f # U :
343 ldq_u t0, 0(a0) # L :
344 lda t6, -1 # E :
345
346 mskql t6, a0, t6 # U :
347 nop
348 nop
3491: subq a1, t4, a1 # E : sub dest misalignment from src addr
350
351 /* If source misalignment is larger than dest misalignment, we need
352 extra startup checks to avoid SEGV. */
353
354 cmplt t4, t5, t8 # E :
355 extql t1, a1, t1 # U : shift src into place
356 lda t2, -1 # E : for creating masks later
357 beq t8, $u_head # U : (stall)
358
359 mskqh t2, t5, t2 # U : begin src byte validity mask
360 cmpbge zero, t1, t7 # E : is there a zero?
361 extql t2, a1, t2 # U :
362 or t7, t10, t5 # E : test for end-of-count too
363
364 cmpbge zero, t2, t3 # E :
365 cmoveq a2, t5, t7 # E : Latency=2, extra map slot
366 nop # E : keep with cmoveq
367 andnot t7, t3, t7 # E : (stall)
368
369 beq t7, $u_head # U :
370 /* At this point we've found a zero in the first partial word of
371 the source. We need to isolate the valid source data and mask
372 it into the original destination data. (Incidentally, we know
373 that we'll need at least one byte of that original dest word.) */
374 ldq_u t0, 0(a0) # L :
375 negq t7, t6 # E : build bitmask of bytes <= zero
376 mskqh t1, t4, t1 # U :
377
378 and t6, t7, t8 # E :
379 subq t8, 1, t6 # E : (stall)
380 or t6, t8, t7 # E : (stall)
381 zapnot t2, t7, t2 # U : prepare source word; mirror changes (stall)
382
383 zapnot t1, t7, t1 # U : to source validity mask
384 andnot t0, t2, t0 # E : zero place for source to reside
385 or t0, t1, t0 # E : and put it there (stall both t0, t1)
386 stq_u t0, 0(a0) # L : (stall)
387
388 ret (t9) # L0 : Latency=3
389
390 cfi_endproc
391

source code of glibc/sysdeps/alpha/alphaev6/stxncpy.S