1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2013 Regents of the University of California |
4 | */ |
5 | |
6 | |
7 | #include <linux/linkage.h> |
8 | #include <asm/asm.h> |
9 | |
10 | /* void *memset(void *, int, size_t) */ |
11 | SYM_FUNC_START(__memset) |
12 | move t0, a0 /* Preserve return value */ |
13 | |
14 | /* Defer to byte-oriented fill for small sizes */ |
15 | sltiu a3, a2, 16 |
16 | bnez a3, 4f |
17 | |
18 | /* |
19 | * Round to nearest XLEN-aligned address |
20 | * greater than or equal to start address |
21 | */ |
22 | addi a3, t0, SZREG-1 |
23 | andi a3, a3, ~(SZREG-1) |
24 | beq a3, t0, 2f /* Skip if already aligned */ |
25 | /* Handle initial misalignment */ |
26 | sub a4, a3, t0 |
27 | 1: |
28 | sb a1, 0(t0) |
29 | addi t0, t0, 1 |
30 | bltu t0, a3, 1b |
31 | sub a2, a2, a4 /* Update count */ |
32 | |
33 | 2: /* Duff's device with 32 XLEN stores per iteration */ |
34 | /* Broadcast value into all bytes */ |
35 | andi a1, a1, 0xff |
36 | slli a3, a1, 8 |
37 | or a1, a3, a1 |
38 | slli a3, a1, 16 |
39 | or a1, a3, a1 |
40 | #ifdef CONFIG_64BIT |
41 | slli a3, a1, 32 |
42 | or a1, a3, a1 |
43 | #endif |
44 | |
45 | /* Calculate end address */ |
46 | andi a4, a2, ~(SZREG-1) |
47 | add a3, t0, a4 |
48 | |
49 | andi a4, a4, 31*SZREG /* Calculate remainder */ |
50 | beqz a4, 3f /* Shortcut if no remainder */ |
51 | neg a4, a4 |
52 | addi a4, a4, 32*SZREG /* Calculate initial offset */ |
53 | |
54 | /* Adjust start address with offset */ |
55 | sub t0, t0, a4 |
56 | |
57 | /* Jump into loop body */ |
58 | /* Assumes 32-bit instruction lengths */ |
59 | la a5, 3f |
60 | #ifdef CONFIG_64BIT |
61 | srli a4, a4, 1 |
62 | #endif |
63 | add a5, a5, a4 |
64 | jr a5 |
65 | 3: |
66 | REG_S a1, 0(t0) |
67 | REG_S a1, SZREG(t0) |
68 | REG_S a1, 2*SZREG(t0) |
69 | REG_S a1, 3*SZREG(t0) |
70 | REG_S a1, 4*SZREG(t0) |
71 | REG_S a1, 5*SZREG(t0) |
72 | REG_S a1, 6*SZREG(t0) |
73 | REG_S a1, 7*SZREG(t0) |
74 | REG_S a1, 8*SZREG(t0) |
75 | REG_S a1, 9*SZREG(t0) |
76 | REG_S a1, 10*SZREG(t0) |
77 | REG_S a1, 11*SZREG(t0) |
78 | REG_S a1, 12*SZREG(t0) |
79 | REG_S a1, 13*SZREG(t0) |
80 | REG_S a1, 14*SZREG(t0) |
81 | REG_S a1, 15*SZREG(t0) |
82 | REG_S a1, 16*SZREG(t0) |
83 | REG_S a1, 17*SZREG(t0) |
84 | REG_S a1, 18*SZREG(t0) |
85 | REG_S a1, 19*SZREG(t0) |
86 | REG_S a1, 20*SZREG(t0) |
87 | REG_S a1, 21*SZREG(t0) |
88 | REG_S a1, 22*SZREG(t0) |
89 | REG_S a1, 23*SZREG(t0) |
90 | REG_S a1, 24*SZREG(t0) |
91 | REG_S a1, 25*SZREG(t0) |
92 | REG_S a1, 26*SZREG(t0) |
93 | REG_S a1, 27*SZREG(t0) |
94 | REG_S a1, 28*SZREG(t0) |
95 | REG_S a1, 29*SZREG(t0) |
96 | REG_S a1, 30*SZREG(t0) |
97 | REG_S a1, 31*SZREG(t0) |
98 | addi t0, t0, 32*SZREG |
99 | bltu t0, a3, 3b |
100 | andi a2, a2, SZREG-1 /* Update count */ |
101 | |
102 | 4: |
103 | /* Handle trailing misalignment */ |
104 | beqz a2, 6f |
105 | add a3, t0, a2 |
106 | 5: |
107 | sb a1, 0(t0) |
108 | addi t0, t0, 1 |
109 | bltu t0, a3, 5b |
110 | 6: |
111 | ret |
112 | SYM_FUNC_END(__memset) |
113 | SYM_FUNC_ALIAS_WEAK(memset, __memset) |
114 | |