1 | /* memset with unaligned store and rep stosb |
2 | Copyright (C) 2016-2024 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | /* memset is implemented as: |
20 | 1. Use overlapping store to avoid branch. |
21 | 2. If size is less than VEC, use integer register stores. |
22 | 3. If size is from VEC_SIZE to 2 * VEC_SIZE, use 2 VEC stores. |
23 | 4. If size is from 2 * VEC_SIZE to 4 * VEC_SIZE, use 4 VEC stores. |
24 | 5. On machines ERMS feature, if size is greater or equal than |
25 | __x86_rep_stosb_threshold then REP STOSB will be used. |
26 | 6. If size is more to 4 * VEC_SIZE, align to 4 * VEC_SIZE with |
27 | 4 VEC stores and store 4 * VEC at a time until done. */ |
28 | |
29 | #include <sysdep.h> |
30 | |
31 | #ifndef MEMSET_CHK_SYMBOL |
32 | # define MEMSET_CHK_SYMBOL(p,s) MEMSET_SYMBOL(p, s) |
33 | #endif |
34 | |
35 | #ifndef WMEMSET_CHK_SYMBOL |
36 | # define WMEMSET_CHK_SYMBOL(p,s) WMEMSET_SYMBOL(p, s) |
37 | #endif |
38 | |
39 | #ifndef VZEROUPPER |
40 | # if VEC_SIZE > 16 |
41 | # define VZEROUPPER vzeroupper |
42 | # define VZEROUPPER_SHORT_RETURN vzeroupper; ret |
43 | # else |
44 | # define VZEROUPPER |
45 | # endif |
46 | #endif |
47 | |
48 | #ifndef VZEROUPPER_SHORT_RETURN |
49 | # define VZEROUPPER_SHORT_RETURN rep; ret |
50 | #endif |
51 | |
52 | #ifndef MOVQ |
53 | # if VEC_SIZE > 16 |
54 | # define MOVQ vmovq |
55 | # define MOVD vmovd |
56 | # else |
57 | # define MOVQ movq |
58 | # define MOVD movd |
59 | # endif |
60 | #endif |
61 | |
62 | #if VEC_SIZE == 64 |
63 | # define LOOP_4X_OFFSET (VEC_SIZE * 4) |
64 | #else |
65 | # define LOOP_4X_OFFSET (0) |
66 | #endif |
67 | |
68 | #if defined USE_WITH_EVEX || defined USE_WITH_AVX512 |
69 | # define END_REG rcx |
70 | # define LOOP_REG rdi |
71 | # define LESS_VEC_REG rax |
72 | #else |
73 | # define END_REG rdi |
74 | # define LOOP_REG rdx |
75 | # define LESS_VEC_REG rdi |
76 | #endif |
77 | |
78 | #ifdef USE_XMM_LESS_VEC |
79 | # define XMM_SMALL 1 |
80 | #else |
81 | # define XMM_SMALL 0 |
82 | #endif |
83 | |
84 | #ifdef USE_LESS_VEC_MASK_STORE |
85 | # define SET_REG64 rcx |
86 | # define SET_REG32 ecx |
87 | # define SET_REG16 cx |
88 | # define SET_REG8 cl |
89 | #else |
90 | # define SET_REG64 rsi |
91 | # define SET_REG32 esi |
92 | # define SET_REG16 si |
93 | # define SET_REG8 sil |
94 | #endif |
95 | |
96 | #define PAGE_SIZE 4096 |
97 | |
98 | /* Macro to calculate size of small memset block for aligning |
99 | purposes. */ |
100 | #define SMALL_MEMSET_ALIGN(mov_sz, ret_sz) (2 * (mov_sz) + (ret_sz) + 1) |
101 | |
102 | |
103 | #ifndef SECTION |
104 | # error SECTION is not defined! |
105 | #endif |
106 | |
107 | .section SECTION(.text), "ax" , @progbits |
108 | #if IS_IN (libc) |
109 | # if defined SHARED |
110 | ENTRY_CHK (WMEMSET_CHK_SYMBOL (__wmemset_chk, unaligned)) |
111 | cmp %RDX_LP, %RCX_LP |
112 | jb HIDDEN_JUMPTARGET (__chk_fail) |
113 | END_CHK (WMEMSET_CHK_SYMBOL (__wmemset_chk, unaligned)) |
114 | # endif |
115 | |
116 | ENTRY (WMEMSET_SYMBOL (__wmemset, unaligned)) |
117 | shl $2, %RDX_LP |
118 | WMEMSET_SET_VEC0_AND_SET_RETURN (%esi, %rdi) |
119 | WMEMSET_VDUP_TO_VEC0_LOW() |
120 | cmpq $VEC_SIZE, %rdx |
121 | jb L(less_vec_from_wmemset) |
122 | WMEMSET_VDUP_TO_VEC0_HIGH() |
123 | jmp L(entry_from_wmemset) |
124 | END (WMEMSET_SYMBOL (__wmemset, unaligned)) |
125 | #endif |
126 | |
127 | #if defined SHARED && IS_IN (libc) |
128 | ENTRY_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned)) |
129 | cmp %RDX_LP, %RCX_LP |
130 | jb HIDDEN_JUMPTARGET (__chk_fail) |
131 | END_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned)) |
132 | #endif |
133 | |
134 | ENTRY (MEMSET_SYMBOL (__memset, unaligned)) |
135 | MEMSET_SET_VEC0_AND_SET_RETURN (%esi, %rdi) |
136 | # ifdef __ILP32__ |
137 | /* Clear the upper 32 bits. */ |
138 | mov %edx, %edx |
139 | # endif |
140 | cmpq $VEC_SIZE, %rdx |
141 | jb L(less_vec) |
142 | MEMSET_VDUP_TO_VEC0_HIGH() |
143 | L(entry_from_wmemset): |
144 | cmpq $(VEC_SIZE * 2), %rdx |
145 | ja L(more_2x_vec) |
146 | /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ |
147 | VMOVU %VMM(0), -VEC_SIZE(%rdi,%rdx) |
148 | VMOVU %VMM(0), (%rdi) |
149 | VZEROUPPER_RETURN |
150 | #if defined USE_MULTIARCH && IS_IN (libc) |
151 | END (MEMSET_SYMBOL (__memset, unaligned)) |
152 | |
153 | # if defined SHARED && IS_IN (libc) |
154 | ENTRY_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned_erms)) |
155 | cmp %RDX_LP, %RCX_LP |
156 | jb HIDDEN_JUMPTARGET (__chk_fail) |
157 | END_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned_erms)) |
158 | # endif |
159 | |
160 | ENTRY_P2ALIGN (MEMSET_SYMBOL (__memset, unaligned_erms), 6) |
161 | MEMSET_SET_VEC0_AND_SET_RETURN (%esi, %rdi) |
162 | # ifdef __ILP32__ |
163 | /* Clear the upper 32 bits. */ |
164 | mov %edx, %edx |
165 | # endif |
166 | cmp $VEC_SIZE, %RDX_LP |
167 | jb L(less_vec) |
168 | MEMSET_VDUP_TO_VEC0_HIGH () |
169 | cmp $(VEC_SIZE * 2), %RDX_LP |
170 | ja L(stosb_more_2x_vec) |
171 | /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ |
172 | VMOVU %VMM(0), (%rdi) |
173 | VMOVU %VMM(0), (VEC_SIZE * -1)(%rdi, %rdx) |
174 | VZEROUPPER_RETURN |
175 | #endif |
176 | |
177 | .p2align 4,, 4 |
178 | L(last_2x_vec): |
179 | #ifdef USE_LESS_VEC_MASK_STORE |
180 | VMOVU %VMM(0), (VEC_SIZE * -2)(%rdi, %rdx) |
181 | VMOVU %VMM(0), (VEC_SIZE * -1)(%rdi, %rdx) |
182 | #else |
183 | VMOVU %VMM(0), (VEC_SIZE * -2)(%rdi) |
184 | VMOVU %VMM(0), (VEC_SIZE * -1)(%rdi) |
185 | #endif |
186 | VZEROUPPER_RETURN |
187 | |
188 | /* If have AVX512 mask instructions put L(less_vec) close to |
189 | entry as it doesn't take much space and is likely a hot target. |
190 | */ |
191 | #ifdef USE_LESS_VEC_MASK_STORE |
192 | .p2align 4,, 10 |
193 | L(less_vec): |
194 | L(less_vec_from_wmemset): |
195 | /* Less than 1 VEC. */ |
196 | # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 |
197 | # error Unsupported VEC_SIZE! |
198 | # endif |
199 | /* Clear high bits from edi. Only keeping bits relevant to page |
200 | cross check. Note that we are using rax which is set in |
201 | MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ |
202 | andl $(PAGE_SIZE - 1), %edi |
203 | /* Check if VEC_SIZE store cross page. Mask stores suffer |
204 | serious performance degradation when it has to fault suppress. |
205 | */ |
206 | cmpl $(PAGE_SIZE - VEC_SIZE), %edi |
207 | /* This is generally considered a cold target. */ |
208 | ja L(cross_page) |
209 | # if VEC_SIZE > 32 |
210 | movq $-1, %rcx |
211 | bzhiq %rdx, %rcx, %rcx |
212 | kmovq %rcx, %k1 |
213 | # else |
214 | movl $-1, %ecx |
215 | bzhil %edx, %ecx, %ecx |
216 | kmovd %ecx, %k1 |
217 | # endif |
218 | vmovdqu8 %VMM(0), (%rax){%k1} |
219 | VZEROUPPER_RETURN |
220 | |
221 | # if defined USE_MULTIARCH && IS_IN (libc) |
222 | /* Include L(stosb_local) here if including L(less_vec) between |
223 | L(stosb_more_2x_vec) and ENTRY. This is to cache align the |
224 | L(stosb_more_2x_vec) target. */ |
225 | .p2align 4,, 10 |
226 | L(stosb_local): |
227 | movzbl %sil, %eax |
228 | mov %RDX_LP, %RCX_LP |
229 | mov %RDI_LP, %RDX_LP |
230 | rep stosb |
231 | mov %RDX_LP, %RAX_LP |
232 | VZEROUPPER_RETURN |
233 | # endif |
234 | #endif |
235 | |
236 | #if defined USE_MULTIARCH && IS_IN (libc) |
237 | .p2align 4 |
238 | L(stosb_more_2x_vec): |
239 | cmp __x86_rep_stosb_threshold(%rip), %RDX_LP |
240 | ja L(stosb_local) |
241 | #endif |
242 | /* Fallthrough goes to L(loop_4x_vec). Tests for memset (2x, 4x] |
243 | and (4x, 8x] jump to target. */ |
244 | L(more_2x_vec): |
245 | /* Store next 2x vec regardless. */ |
246 | VMOVU %VMM(0), (%rdi) |
247 | VMOVU %VMM(0), (VEC_SIZE * 1)(%rdi) |
248 | |
249 | |
250 | /* Two different methods of setting up pointers / compare. The two |
251 | methods are based on the fact that EVEX/AVX512 mov instructions take |
252 | more bytes then AVX2/SSE2 mov instructions. As well that EVEX/AVX512 |
253 | machines also have fast LEA_BID. Both setup and END_REG to avoid complex |
254 | address mode. For EVEX/AVX512 this saves code size and keeps a few |
255 | targets in one fetch block. For AVX2/SSE2 this helps prevent AGU |
256 | bottlenecks. */ |
257 | #if !(defined USE_WITH_EVEX || defined USE_WITH_AVX512) |
258 | /* If AVX2/SSE2 compute END_REG (rdi) with ALU. */ |
259 | addq %rdx, %END_REG |
260 | #endif |
261 | |
262 | cmpq $(VEC_SIZE * 4), %rdx |
263 | jbe L(last_2x_vec) |
264 | |
265 | |
266 | #if defined USE_WITH_EVEX || defined USE_WITH_AVX512 |
267 | /* If EVEX/AVX512 compute END_REG - (VEC_SIZE * 4 + LOOP_4X_OFFSET) with |
268 | LEA_BID. */ |
269 | |
270 | /* END_REG is rcx for EVEX/AVX512. */ |
271 | leaq -(VEC_SIZE * 4 + LOOP_4X_OFFSET)(%rdi, %rdx), %END_REG |
272 | #endif |
273 | |
274 | /* Store next 2x vec regardless. */ |
275 | VMOVU %VMM(0), (VEC_SIZE * 2)(%rax) |
276 | VMOVU %VMM(0), (VEC_SIZE * 3)(%rax) |
277 | |
278 | |
279 | #if defined USE_WITH_EVEX || defined USE_WITH_AVX512 |
280 | /* If LOOP_4X_OFFSET don't readjust LOOP_REG (rdi), just add |
281 | extra offset to addresses in loop. Used for AVX512 to save space |
282 | as no way to get (VEC_SIZE * 4) in imm8. */ |
283 | # if LOOP_4X_OFFSET == 0 |
284 | subq $-(VEC_SIZE * 4), %LOOP_REG |
285 | # endif |
286 | /* Avoid imm32 compare here to save code size. */ |
287 | cmpq %rdi, %rcx |
288 | #else |
289 | addq $-(VEC_SIZE * 4), %END_REG |
290 | cmpq $(VEC_SIZE * 8), %rdx |
291 | #endif |
292 | jbe L(last_4x_vec) |
293 | #if !(defined USE_WITH_EVEX || defined USE_WITH_AVX512) |
294 | /* Set LOOP_REG (rdx). */ |
295 | leaq (VEC_SIZE * 4)(%rax), %LOOP_REG |
296 | #endif |
297 | /* Align dst for loop. */ |
298 | andq $(VEC_SIZE * -1), %LOOP_REG |
299 | .p2align 4 |
300 | L(loop): |
301 | VMOVA %VMM(0), LOOP_4X_OFFSET(%LOOP_REG) |
302 | VMOVA %VMM(0), (VEC_SIZE + LOOP_4X_OFFSET)(%LOOP_REG) |
303 | VMOVA %VMM(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%LOOP_REG) |
304 | VMOVA %VMM(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%LOOP_REG) |
305 | subq $-(VEC_SIZE * 4), %LOOP_REG |
306 | cmpq %END_REG, %LOOP_REG |
307 | jb L(loop) |
308 | .p2align 4,, MOV_SIZE |
309 | L(last_4x_vec): |
310 | VMOVU %VMM(0), LOOP_4X_OFFSET(%END_REG) |
311 | VMOVU %VMM(0), (VEC_SIZE + LOOP_4X_OFFSET)(%END_REG) |
312 | VMOVU %VMM(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%END_REG) |
313 | VMOVU %VMM(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%END_REG) |
314 | L(return_vzeroupper): |
315 | #if VEC_SIZE > 16 |
316 | ZERO_UPPER_VEC_REGISTERS_RETURN |
317 | #else |
318 | ret |
319 | #endif |
320 | |
321 | .p2align 4,, 10 |
322 | #ifndef USE_LESS_VEC_MASK_STORE |
323 | # if defined USE_MULTIARCH && IS_IN (libc) |
324 | /* If no USE_LESS_VEC_MASK put L(stosb_local) here. Will be in |
325 | range for 2-byte jump encoding. */ |
326 | L(stosb_local): |
327 | movzbl %sil, %eax |
328 | mov %RDX_LP, %RCX_LP |
329 | mov %RDI_LP, %RDX_LP |
330 | rep stosb |
331 | mov %RDX_LP, %RAX_LP |
332 | VZEROUPPER_RETURN |
333 | # endif |
334 | /* Define L(less_vec) only if not otherwise defined. */ |
335 | .p2align 4 |
336 | L(less_vec): |
337 | /* Broadcast esi to partial register (i.e VEC_SIZE == 32 broadcast to |
338 | xmm). This is only does anything for AVX2. */ |
339 | MEMSET_VDUP_TO_VEC0_LOW () |
340 | L(less_vec_from_wmemset): |
341 | #endif |
342 | L(cross_page): |
343 | #if VEC_SIZE > 32 |
344 | cmpl $32, %edx |
345 | jge L(between_32_63) |
346 | #endif |
347 | #if VEC_SIZE > 16 |
348 | cmpl $16, %edx |
349 | jge L(between_16_31) |
350 | #endif |
351 | #ifndef USE_XMM_LESS_VEC |
352 | MOVQ %VMM_128(0), %SET_REG64 |
353 | #endif |
354 | cmpl $8, %edx |
355 | jge L(between_8_15) |
356 | cmpl $4, %edx |
357 | jge L(between_4_7) |
358 | cmpl $1, %edx |
359 | jg L(between_2_3) |
360 | jl L(between_0_0) |
361 | movb %SET_REG8, (%LESS_VEC_REG) |
362 | L(between_0_0): |
363 | ret |
364 | |
365 | /* Align small targets only if not doing so would cross a fetch line. |
366 | */ |
367 | #if VEC_SIZE > 32 |
368 | .p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, RET_SIZE) |
369 | /* From 32 to 63. No branch when size == 32. */ |
370 | L(between_32_63): |
371 | VMOVU %VMM_256(0), (%LESS_VEC_REG) |
372 | VMOVU %VMM_256(0), -32(%LESS_VEC_REG, %rdx) |
373 | VZEROUPPER_RETURN |
374 | #endif |
375 | |
376 | #if VEC_SIZE >= 32 |
377 | .p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, 1) |
378 | L(between_16_31): |
379 | /* From 16 to 31. No branch when size == 16. */ |
380 | VMOVU %VMM_128(0), (%LESS_VEC_REG) |
381 | VMOVU %VMM_128(0), -16(%LESS_VEC_REG, %rdx) |
382 | ret |
383 | #endif |
384 | |
385 | /* Move size is 3 for SSE2, EVEX, and AVX512. Move size is 4 for AVX2. |
386 | */ |
387 | .p2align 4,, SMALL_MEMSET_ALIGN(3 + XMM_SMALL, 1) |
388 | L(between_8_15): |
389 | /* From 8 to 15. No branch when size == 8. */ |
390 | #ifdef USE_XMM_LESS_VEC |
391 | MOVQ %VMM_128(0), (%rdi) |
392 | MOVQ %VMM_128(0), -8(%rdi, %rdx) |
393 | #else |
394 | movq %SET_REG64, (%LESS_VEC_REG) |
395 | movq %SET_REG64, -8(%LESS_VEC_REG, %rdx) |
396 | #endif |
397 | ret |
398 | |
399 | /* Move size is 2 for SSE2, EVEX, and AVX512. Move size is 4 for AVX2. |
400 | */ |
401 | .p2align 4,, SMALL_MEMSET_ALIGN(2 << XMM_SMALL, 1) |
402 | L(between_4_7): |
403 | /* From 4 to 7. No branch when size == 4. */ |
404 | #ifdef USE_XMM_LESS_VEC |
405 | MOVD %VMM_128(0), (%rdi) |
406 | MOVD %VMM_128(0), -4(%rdi, %rdx) |
407 | #else |
408 | movl %SET_REG32, (%LESS_VEC_REG) |
409 | movl %SET_REG32, -4(%LESS_VEC_REG, %rdx) |
410 | #endif |
411 | ret |
412 | |
413 | /* 4 * XMM_SMALL for the third mov for AVX2. */ |
414 | .p2align 4,, 4 * XMM_SMALL + SMALL_MEMSET_ALIGN(3, 1) |
415 | L(between_2_3): |
416 | /* From 2 to 3. No branch when size == 2. */ |
417 | #ifdef USE_XMM_LESS_VEC |
418 | movb %SET_REG8, (%rdi) |
419 | movb %SET_REG8, 1(%rdi) |
420 | movb %SET_REG8, -1(%rdi, %rdx) |
421 | #else |
422 | movw %SET_REG16, (%LESS_VEC_REG) |
423 | movb %SET_REG8, -1(%LESS_VEC_REG, %rdx) |
424 | #endif |
425 | ret |
426 | END (MEMSET_SYMBOL (__memset, unaligned_erms)) |
427 | |