1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* linux/arch/sparc/lib/memset.S: Sparc optimized memset, bzero and clear_user code |
3 | * Copyright (C) 1991,1996 Free Software Foundation |
4 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
5 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
6 | * |
7 | * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and |
8 | * number of bytes not yet set if exception occurs and we were called as |
9 | * clear_user. |
10 | */ |
11 | |
12 | #include <linux/export.h> |
13 | #include <asm/ptrace.h> |
14 | |
15 | /* Work around cpp -rob */ |
16 | #define ALLOC #alloc |
17 | #define EXECINSTR #execinstr |
18 | #define EX(x,y,a,b) \ |
19 | 98: x,y; \ |
20 | .section .fixup,ALLOC,EXECINSTR; \ |
21 | .align 4; \ |
22 | 99: retl; \ |
23 | a, b, %o0; \ |
24 | .section __ex_table,ALLOC; \ |
25 | .align 4; \ |
26 | .word 98b, 99b; \ |
27 | .text; \ |
28 | .align 4 |
29 | |
30 | #define STORE(source, base, offset, n) \ |
31 | 98: std source, [base + offset + n]; \ |
32 | .section .fixup,ALLOC,EXECINSTR; \ |
33 | .align 4; \ |
34 | 99: ba 30f; \ |
35 | sub %o3, n - offset, %o3; \ |
36 | .section __ex_table,ALLOC; \ |
37 | .align 4; \ |
38 | .word 98b, 99b; \ |
39 | .text; \ |
40 | .align 4; |
41 | |
42 | #define STORE_LAST(source, base, offset, n) \ |
43 | EX(std source, [base - offset - n], \ |
44 | add %o1, offset + n); |
45 | |
46 | /* Please don't change these macros, unless you change the logic |
47 | * in the .fixup section below as well. |
48 | * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */ |
49 | #define ZERO_BIG_BLOCK(base, offset, source) \ |
50 | STORE(source, base, offset, 0x00); \ |
51 | STORE(source, base, offset, 0x08); \ |
52 | STORE(source, base, offset, 0x10); \ |
53 | STORE(source, base, offset, 0x18); \ |
54 | STORE(source, base, offset, 0x20); \ |
55 | STORE(source, base, offset, 0x28); \ |
56 | STORE(source, base, offset, 0x30); \ |
57 | STORE(source, base, offset, 0x38); |
58 | |
59 | #define ZERO_LAST_BLOCKS(base, offset, source) \ |
60 | STORE_LAST(source, base, offset, 0x38); \ |
61 | STORE_LAST(source, base, offset, 0x30); \ |
62 | STORE_LAST(source, base, offset, 0x28); \ |
63 | STORE_LAST(source, base, offset, 0x20); \ |
64 | STORE_LAST(source, base, offset, 0x18); \ |
65 | STORE_LAST(source, base, offset, 0x10); \ |
66 | STORE_LAST(source, base, offset, 0x08); \ |
67 | STORE_LAST(source, base, offset, 0x00); |
68 | |
69 | .text |
70 | .align 4 |
71 | |
72 | .globl __bzero_begin |
73 | __bzero_begin: |
74 | |
75 | .globl __bzero |
76 | .type __bzero,#function |
77 | .globl memset |
78 | EXPORT_SYMBOL(__bzero) |
79 | EXPORT_SYMBOL(memset) |
80 | memset: |
81 | mov %o0, %g1 |
82 | mov 1, %g4 |
83 | and %o1, 0xff, %g3 |
84 | sll %g3, 8, %g2 |
85 | or %g3, %g2, %g3 |
86 | sll %g3, 16, %g2 |
87 | or %g3, %g2, %g3 |
88 | b 1f |
89 | mov %o2, %o1 |
90 | 3: |
91 | cmp %o2, 3 |
92 | be 2f |
93 | EX(stb %g3, [%o0], sub %o1, 0) |
94 | |
95 | cmp %o2, 2 |
96 | be 2f |
97 | EX(stb %g3, [%o0 + 0x01], sub %o1, 1) |
98 | |
99 | EX(stb %g3, [%o0 + 0x02], sub %o1, 2) |
100 | 2: |
101 | sub %o2, 4, %o2 |
102 | add %o1, %o2, %o1 |
103 | b 4f |
104 | sub %o0, %o2, %o0 |
105 | |
106 | __bzero: |
107 | clr %g4 |
108 | mov %g0, %g3 |
109 | 1: |
110 | cmp %o1, 7 |
111 | bleu 7f |
112 | andcc %o0, 3, %o2 |
113 | |
114 | bne 3b |
115 | 4: |
116 | andcc %o0, 4, %g0 |
117 | |
118 | be 2f |
119 | mov %g3, %g2 |
120 | |
121 | EX(st %g3, [%o0], sub %o1, 0) |
122 | sub %o1, 4, %o1 |
123 | add %o0, 4, %o0 |
124 | 2: |
125 | andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run |
126 | be 9f |
127 | andcc %o1, 0x78, %o2 |
128 | 10: |
129 | ZERO_BIG_BLOCK(%o0, 0x00, %g2) |
130 | subcc %o3, 128, %o3 |
131 | ZERO_BIG_BLOCK(%o0, 0x40, %g2) |
132 | bne 10b |
133 | add %o0, 128, %o0 |
134 | |
135 | orcc %o2, %g0, %g0 |
136 | 9: |
137 | be 13f |
138 | andcc %o1, 7, %o1 |
139 | |
140 | srl %o2, 1, %o3 |
141 | set 13f, %o4 |
142 | sub %o4, %o3, %o4 |
143 | jmp %o4 |
144 | add %o0, %o2, %o0 |
145 | |
146 | ZERO_LAST_BLOCKS(%o0, 0x48, %g2) |
147 | ZERO_LAST_BLOCKS(%o0, 0x08, %g2) |
148 | 13: |
149 | be 8f |
150 | andcc %o1, 4, %g0 |
151 | |
152 | be 1f |
153 | andcc %o1, 2, %g0 |
154 | |
155 | EX(st %g3, [%o0], and %o1, 7) |
156 | add %o0, 4, %o0 |
157 | 1: |
158 | be 1f |
159 | andcc %o1, 1, %g0 |
160 | |
161 | EX(sth %g3, [%o0], and %o1, 3) |
162 | add %o0, 2, %o0 |
163 | 1: |
164 | bne,a 8f |
165 | EX(stb %g3, [%o0], and %o1, 1) |
166 | 8: |
167 | b 0f |
168 | nop |
169 | 7: |
170 | be 13b |
171 | orcc %o1, 0, %g0 |
172 | |
173 | be 0f |
174 | 8: |
175 | add %o0, 1, %o0 |
176 | subcc %o1, 1, %o1 |
177 | bne 8b |
178 | EX(stb %g3, [%o0 - 1], add %o1, 1) |
179 | 0: |
180 | andcc %g4, 1, %g0 |
181 | be 5f |
182 | nop |
183 | retl |
184 | mov %g1, %o0 |
185 | 5: |
186 | retl |
187 | clr %o0 |
188 | |
189 | .section .fixup,#alloc,#execinstr |
190 | .align 4 |
191 | 30: |
192 | and %o1, 0x7f, %o1 |
193 | retl |
194 | add %o3, %o1, %o0 |
195 | |
196 | .globl __bzero_end |
197 | __bzero_end: |
198 | |