1/*
2 * strlen - calculate the length of a string
3 *
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 */
8
9/* Assumptions:
10 *
11 * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
12 */
13
14#include "../asmdefs.h"
15
16/* To test the page crossing code path more thoroughly, compile with
17 -DTEST_PAGE_CROSS - this will force all calls through the slower
18 entry path. This option is not intended for production use. */
19
20/* Arguments and results. */
21#define srcin x0
22#define len x0
23
24/* Locals and temporaries. */
25#define src x1
26#define data1 x2
27#define data2 x3
28#define has_nul1 x4
29#define has_nul2 x5
30#define tmp1 x4
31#define tmp2 x5
32#define tmp3 x6
33#define tmp4 x7
34#define zeroones x8
35
36 /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
37 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
38 can be done in parallel across the entire word. A faster check
39 (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
40 false hits for characters 129..255. */
41
42#define REP8_01 0x0101010101010101
43#define REP8_7f 0x7f7f7f7f7f7f7f7f
44#define REP8_80 0x8080808080808080
45
46#ifdef TEST_PAGE_CROSS
47# define MIN_PAGE_SIZE 15
48#else
49# define MIN_PAGE_SIZE 4096
50#endif
51
52 /* Since strings are short on average, we check the first 16 bytes
53 of the string for a NUL character. In order to do an unaligned ldp
54 safely we have to do a page cross check first. If there is a NUL
55 byte we calculate the length from the 2 8-byte words using
56 conditional select to reduce branch mispredictions (it is unlikely
57 __strlen_aarch64 will be repeatedly called on strings with the same length).
58
59 If the string is longer than 16 bytes, we align src so don't need
60 further page cross checks, and process 32 bytes per iteration
61 using the fast NUL check. If we encounter non-ASCII characters,
62 fallback to a second loop using the full NUL check.
63
64 If the page cross check fails, we read 16 bytes from an aligned
65 address, remove any characters before the string, and continue
66 in the main loop using aligned loads. Since strings crossing a
67 page in the first 16 bytes are rare (probability of
68 16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
69
70 AArch64 systems have a minimum page size of 4k. We don't bother
71 checking for larger page sizes - the cost of setting up the correct
72 page size is just not worth the extra gain from a small reduction in
73 the cases taking the slow path. Note that we only care about
74 whether the first fetch, which may be misaligned, crosses a page
75 boundary. */
76
77ENTRY (__strlen_aarch64)
78 and tmp1, srcin, MIN_PAGE_SIZE - 1
79 mov zeroones, REP8_01
80 cmp tmp1, MIN_PAGE_SIZE - 16
81 b.gt L(page_cross)
82 ldp data1, data2, [srcin]
83#ifdef __AARCH64EB__
84 /* For big-endian, carry propagation (if the final byte in the
85 string is 0x01) means we cannot use has_nul1/2 directly.
86 Since we expect strings to be small and early-exit,
87 byte-swap the data now so has_null1/2 will be correct. */
88 rev data1, data1
89 rev data2, data2
90#endif
91 sub tmp1, data1, zeroones
92 orr tmp2, data1, REP8_7f
93 sub tmp3, data2, zeroones
94 orr tmp4, data2, REP8_7f
95 bics has_nul1, tmp1, tmp2
96 bic has_nul2, tmp3, tmp4
97 ccmp has_nul2, 0, 0, eq
98 beq L(main_loop_entry)
99
100 /* Enter with C = has_nul1 == 0. */
101 csel has_nul1, has_nul1, has_nul2, cc
102 mov len, 8
103 rev has_nul1, has_nul1
104 clz tmp1, has_nul1
105 csel len, xzr, len, cc
106 add len, len, tmp1, lsr 3
107 ret
108
109 /* The inner loop processes 32 bytes per iteration and uses the fast
110 NUL check. If we encounter non-ASCII characters, use a second
111 loop with the accurate NUL check. */
112 .p2align 4
113L(main_loop_entry):
114 bic src, srcin, 15
115 sub src, src, 16
116L(main_loop):
117 ldp data1, data2, [src, 32]!
118L(page_cross_entry):
119 sub tmp1, data1, zeroones
120 sub tmp3, data2, zeroones
121 orr tmp2, tmp1, tmp3
122 tst tmp2, zeroones, lsl 7
123 bne 1f
124 ldp data1, data2, [src, 16]
125 sub tmp1, data1, zeroones
126 sub tmp3, data2, zeroones
127 orr tmp2, tmp1, tmp3
128 tst tmp2, zeroones, lsl 7
129 beq L(main_loop)
130 add src, src, 16
1311:
132 /* The fast check failed, so do the slower, accurate NUL check. */
133 orr tmp2, data1, REP8_7f
134 orr tmp4, data2, REP8_7f
135 bics has_nul1, tmp1, tmp2
136 bic has_nul2, tmp3, tmp4
137 ccmp has_nul2, 0, 0, eq
138 beq L(nonascii_loop)
139
140 /* Enter with C = has_nul1 == 0. */
141L(tail):
142#ifdef __AARCH64EB__
143 /* For big-endian, carry propagation (if the final byte in the
144 string is 0x01) means we cannot use has_nul1/2 directly. The
145 easiest way to get the correct byte is to byte-swap the data
146 and calculate the syndrome a second time. */
147 csel data1, data1, data2, cc
148 rev data1, data1
149 sub tmp1, data1, zeroones
150 orr tmp2, data1, REP8_7f
151 bic has_nul1, tmp1, tmp2
152#else
153 csel has_nul1, has_nul1, has_nul2, cc
154#endif
155 sub len, src, srcin
156 rev has_nul1, has_nul1
157 add tmp2, len, 8
158 clz tmp1, has_nul1
159 csel len, len, tmp2, cc
160 add len, len, tmp1, lsr 3
161 ret
162
163L(nonascii_loop):
164 ldp data1, data2, [src, 16]!
165 sub tmp1, data1, zeroones
166 orr tmp2, data1, REP8_7f
167 sub tmp3, data2, zeroones
168 orr tmp4, data2, REP8_7f
169 bics has_nul1, tmp1, tmp2
170 bic has_nul2, tmp3, tmp4
171 ccmp has_nul2, 0, 0, eq
172 bne L(tail)
173 ldp data1, data2, [src, 16]!
174 sub tmp1, data1, zeroones
175 orr tmp2, data1, REP8_7f
176 sub tmp3, data2, zeroones
177 orr tmp4, data2, REP8_7f
178 bics has_nul1, tmp1, tmp2
179 bic has_nul2, tmp3, tmp4
180 ccmp has_nul2, 0, 0, eq
181 beq L(nonascii_loop)
182 b L(tail)
183
184 /* Load 16 bytes from [srcin & ~15] and force the bytes that precede
185 srcin to 0x7f, so we ignore any NUL bytes before the string.
186 Then continue in the aligned loop. */
187L(page_cross):
188 bic src, srcin, 15
189 ldp data1, data2, [src]
190 lsl tmp1, srcin, 3
191 mov tmp4, -1
192#ifdef __AARCH64EB__
193 /* Big-endian. Early bytes are at MSB. */
194 lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
195#else
196 /* Little-endian. Early bytes are at LSB. */
197 lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
198#endif
199 orr tmp1, tmp1, REP8_80
200 orn data1, data1, tmp1
201 orn tmp2, data2, tmp1
202 tst srcin, 8
203 csel data1, data1, tmp4, eq
204 csel data2, data2, tmp2, eq
205 b L(page_cross_entry)
206
207END (__strlen_aarch64)
208

source code of libc/AOR_v20.02/string/aarch64/strlen.S