1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/export.h> |
3 | #include <linux/linkage.h> |
4 | #include <asm/processor.h> |
5 | #include <asm/ppc_asm.h> |
6 | #include <asm/reg.h> |
7 | #include <asm/asm-offsets.h> |
8 | #include <asm/cputable.h> |
9 | #include <asm/thread_info.h> |
10 | #include <asm/page.h> |
11 | #include <asm/ptrace.h> |
12 | #include <asm/asm-compat.h> |
13 | |
14 | /* |
15 | * Load state from memory into VMX registers including VSCR. |
16 | * Assumes the caller has enabled VMX in the MSR. |
17 | */ |
18 | _GLOBAL(load_vr_state) |
19 | li r4,VRSTATE_VSCR |
20 | lvx v0,r4,r3 |
21 | mtvscr v0 |
22 | REST_32VRS(0,r4,r3) |
23 | blr |
24 | EXPORT_SYMBOL(load_vr_state) |
25 | _ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */ |
26 | |
27 | /* |
28 | * Store VMX state into memory, including VSCR. |
29 | * Assumes the caller has enabled VMX in the MSR. |
30 | */ |
31 | _GLOBAL(store_vr_state) |
32 | SAVE_32VRS(0, r4, r3) |
33 | mfvscr v0 |
34 | li r4, VRSTATE_VSCR |
35 | stvx v0, r4, r3 |
36 | lvx v0, 0, r3 |
37 | blr |
38 | EXPORT_SYMBOL(store_vr_state) |
39 | |
40 | /* |
41 | * Disable VMX for the task which had it previously, |
42 | * and save its vector registers in its thread_struct. |
43 | * Enables the VMX for use in the kernel on return. |
44 | * On SMP we know the VMX is free, since we give it up every |
45 | * switch (ie, no lazy save of the vector registers). |
46 | * |
47 | * Note that on 32-bit this can only use registers that will be |
48 | * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. |
49 | */ |
50 | _GLOBAL(load_up_altivec) |
51 | mfmsr r5 /* grab the current MSR */ |
52 | #ifdef CONFIG_PPC_BOOK3S_64 |
53 | /* interrupt doesn't set MSR[RI] and HPT can fault on current access */ |
54 | ori r5,r5,MSR_RI |
55 | #endif |
56 | oris r5,r5,MSR_VEC@h |
57 | MTMSRD(r5) /* enable use of AltiVec now */ |
58 | isync |
59 | |
60 | /* |
61 | * While userspace in general ignores VRSAVE, glibc uses it as a boolean |
62 | * to optimise userspace context save/restore. Whenever we take an |
63 | * altivec unavailable exception we must set VRSAVE to something non |
64 | * zero. Set it to all 1s. See also the programming note in the ISA. |
65 | */ |
66 | mfspr r4,SPRN_VRSAVE |
67 | cmpwi 0,r4,0 |
68 | bne+ 1f |
69 | li r4,-1 |
70 | mtspr SPRN_VRSAVE,r4 |
71 | 1: |
72 | /* enable use of VMX after return */ |
73 | #ifdef CONFIG_PPC32 |
74 | addi r5,r2,THREAD |
75 | oris r9,r9,MSR_VEC@h |
76 | #else |
77 | ld r4,PACACURRENT(r13) |
78 | addi r5,r4,THREAD /* Get THREAD */ |
79 | oris r12,r12,MSR_VEC@h |
80 | std r12,_MSR(r1) |
81 | #ifdef CONFIG_PPC_BOOK3S_64 |
82 | li r4,0 |
83 | stb r4,PACASRR_VALID(r13) |
84 | #endif |
85 | #endif |
86 | li r4,1 |
87 | stb r4,THREAD_LOAD_VEC(r5) |
88 | addi r6,r5,THREAD_VRSTATE |
89 | li r10,VRSTATE_VSCR |
90 | stw r4,THREAD_USED_VR(r5) |
91 | lvx v0,r10,r6 |
92 | mtvscr v0 |
93 | REST_32VRS(0,r4,r6) |
94 | /* restore registers and return */ |
95 | blr |
96 | _ASM_NOKPROBE_SYMBOL(load_up_altivec) |
97 | |
98 | /* |
99 | * save_altivec(tsk) |
100 | * Save the vector registers to its thread_struct |
101 | */ |
102 | _GLOBAL(save_altivec) |
103 | addi r3,r3,THREAD /* want THREAD of task */ |
104 | PPC_LL r7,THREAD_VRSAVEAREA(r3) |
105 | PPC_LL r5,PT_REGS(r3) |
106 | PPC_LCMPI 0,r7,0 |
107 | bne 2f |
108 | addi r7,r3,THREAD_VRSTATE |
109 | 2: SAVE_32VRS(0,r4,r7) |
110 | mfvscr v0 |
111 | li r4,VRSTATE_VSCR |
112 | stvx v0,r4,r7 |
113 | lvx v0,0,r7 |
114 | blr |
115 | |
116 | #ifdef CONFIG_VSX |
117 | |
118 | #ifdef CONFIG_PPC32 |
119 | #error This asm code isn't ready for 32-bit kernels |
120 | #endif |
121 | |
122 | /* |
123 | * load_up_vsx(unused, unused, tsk) |
124 | * Disable VSX for the task which had it previously, |
125 | * and save its vector registers in its thread_struct. |
126 | * Reuse the fp and vsx saves, but first check to see if they have |
127 | * been saved already. |
128 | */ |
129 | _GLOBAL(load_up_vsx) |
130 | /* Load FP and VSX registers if they haven't been done yet */ |
131 | andi. r5,r12,MSR_FP |
132 | beql+ load_up_fpu /* skip if already loaded */ |
133 | andis. r5,r12,MSR_VEC@h |
134 | beql+ load_up_altivec /* skip if already loaded */ |
135 | |
136 | #ifdef CONFIG_PPC_BOOK3S_64 |
137 | /* interrupt doesn't set MSR[RI] and HPT can fault on current access */ |
138 | li r5,MSR_RI |
139 | mtmsrd r5,1 |
140 | #endif |
141 | |
142 | ld r4,PACACURRENT(r13) |
143 | addi r4,r4,THREAD /* Get THREAD */ |
144 | li r6,1 |
145 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ |
146 | /* enable use of VSX after return */ |
147 | oris r12,r12,MSR_VSX@h |
148 | std r12,_MSR(r1) |
149 | li r4,0 |
150 | stb r4,PACASRR_VALID(r13) |
151 | b fast_interrupt_return_srr |
152 | |
153 | #endif /* CONFIG_VSX */ |
154 | |
155 | |
156 | /* |
157 | * The routines below are in assembler so we can closely control the |
158 | * usage of floating-point registers. These routines must be called |
159 | * with preempt disabled. |
160 | */ |
161 | .data |
162 | #ifdef CONFIG_PPC32 |
163 | fpzero: |
164 | .long 0 |
165 | fpone: |
166 | .long 0x3f800000 /* 1.0 in single-precision FP */ |
167 | fphalf: |
168 | .long 0x3f000000 /* 0.5 in single-precision FP */ |
169 | |
170 | #define LDCONST(fr, name) \ |
171 | lis r11,name@ha; \ |
172 | lfs fr,name@l(r11) |
173 | #else |
174 | |
175 | fpzero: |
176 | .quad 0 |
177 | fpone: |
178 | .quad 0x3ff0000000000000 /* 1.0 */ |
179 | fphalf: |
180 | .quad 0x3fe0000000000000 /* 0.5 */ |
181 | |
182 | #ifdef CONFIG_PPC_KERNEL_PCREL |
183 | #define LDCONST(fr, name) \ |
184 | pla r11,name@pcrel; \ |
185 | lfd fr,0(r11) |
186 | #else |
187 | #define LDCONST(fr, name) \ |
188 | addis r11,r2,name@toc@ha; \ |
189 | lfd fr,name@toc@l(r11) |
190 | #endif |
191 | #endif |
192 | .text |
193 | /* |
194 | * Internal routine to enable floating point and set FPSCR to 0. |
195 | * Don't call it from C; it doesn't use the normal calling convention. |
196 | */ |
197 | SYM_FUNC_START_LOCAL(fpenable) |
198 | #ifdef CONFIG_PPC32 |
199 | stwu r1,-64(r1) |
200 | #else |
201 | stdu r1,-64(r1) |
202 | #endif |
203 | mfmsr r10 |
204 | ori r11,r10,MSR_FP |
205 | mtmsr r11 |
206 | isync |
207 | stfd fr0,24(r1) |
208 | stfd fr1,16(r1) |
209 | stfd fr31,8(r1) |
210 | LDCONST(fr1, fpzero) |
211 | mffs fr31 |
212 | MTFSF_L(fr1) |
213 | blr |
214 | SYM_FUNC_END(fpenable) |
215 | |
216 | fpdisable: |
217 | mtlr r12 |
218 | MTFSF_L(fr31) |
219 | lfd fr31,8(r1) |
220 | lfd fr1,16(r1) |
221 | lfd fr0,24(r1) |
222 | mtmsr r10 |
223 | isync |
224 | addi r1,r1,64 |
225 | blr |
226 | |
227 | /* |
228 | * Vector add, floating point. |
229 | */ |
230 | _GLOBAL(vaddfp) |
231 | mflr r12 |
232 | bl fpenable |
233 | li r0,4 |
234 | mtctr r0 |
235 | li r6,0 |
236 | 1: lfsx fr0,r4,r6 |
237 | lfsx fr1,r5,r6 |
238 | fadds fr0,fr0,fr1 |
239 | stfsx fr0,r3,r6 |
240 | addi r6,r6,4 |
241 | bdnz 1b |
242 | b fpdisable |
243 | |
244 | /* |
245 | * Vector subtract, floating point. |
246 | */ |
247 | _GLOBAL(vsubfp) |
248 | mflr r12 |
249 | bl fpenable |
250 | li r0,4 |
251 | mtctr r0 |
252 | li r6,0 |
253 | 1: lfsx fr0,r4,r6 |
254 | lfsx fr1,r5,r6 |
255 | fsubs fr0,fr0,fr1 |
256 | stfsx fr0,r3,r6 |
257 | addi r6,r6,4 |
258 | bdnz 1b |
259 | b fpdisable |
260 | |
261 | /* |
262 | * Vector multiply and add, floating point. |
263 | */ |
264 | _GLOBAL(vmaddfp) |
265 | mflr r12 |
266 | bl fpenable |
267 | stfd fr2,32(r1) |
268 | li r0,4 |
269 | mtctr r0 |
270 | li r7,0 |
271 | 1: lfsx fr0,r4,r7 |
272 | lfsx fr1,r5,r7 |
273 | lfsx fr2,r6,r7 |
274 | fmadds fr0,fr0,fr2,fr1 |
275 | stfsx fr0,r3,r7 |
276 | addi r7,r7,4 |
277 | bdnz 1b |
278 | lfd fr2,32(r1) |
279 | b fpdisable |
280 | |
281 | /* |
282 | * Vector negative multiply and subtract, floating point. |
283 | */ |
284 | _GLOBAL(vnmsubfp) |
285 | mflr r12 |
286 | bl fpenable |
287 | stfd fr2,32(r1) |
288 | li r0,4 |
289 | mtctr r0 |
290 | li r7,0 |
291 | 1: lfsx fr0,r4,r7 |
292 | lfsx fr1,r5,r7 |
293 | lfsx fr2,r6,r7 |
294 | fnmsubs fr0,fr0,fr2,fr1 |
295 | stfsx fr0,r3,r7 |
296 | addi r7,r7,4 |
297 | bdnz 1b |
298 | lfd fr2,32(r1) |
299 | b fpdisable |
300 | |
301 | /* |
302 | * Vector reciprocal estimate. We just compute 1.0/x. |
303 | * r3 -> destination, r4 -> source. |
304 | */ |
305 | _GLOBAL(vrefp) |
306 | mflr r12 |
307 | bl fpenable |
308 | li r0,4 |
309 | LDCONST(fr1, fpone) |
310 | mtctr r0 |
311 | li r6,0 |
312 | 1: lfsx fr0,r4,r6 |
313 | fdivs fr0,fr1,fr0 |
314 | stfsx fr0,r3,r6 |
315 | addi r6,r6,4 |
316 | bdnz 1b |
317 | b fpdisable |
318 | |
319 | /* |
320 | * Vector reciprocal square-root estimate, floating point. |
321 | * We use the frsqrte instruction for the initial estimate followed |
322 | * by 2 iterations of Newton-Raphson to get sufficient accuracy. |
323 | * r3 -> destination, r4 -> source. |
324 | */ |
325 | _GLOBAL(vrsqrtefp) |
326 | mflr r12 |
327 | bl fpenable |
328 | stfd fr2,32(r1) |
329 | stfd fr3,40(r1) |
330 | stfd fr4,48(r1) |
331 | stfd fr5,56(r1) |
332 | li r0,4 |
333 | LDCONST(fr4, fpone) |
334 | LDCONST(fr5, fphalf) |
335 | mtctr r0 |
336 | li r6,0 |
337 | 1: lfsx fr0,r4,r6 |
338 | frsqrte fr1,fr0 /* r = frsqrte(s) */ |
339 | fmuls fr3,fr1,fr0 /* r * s */ |
340 | fmuls fr2,fr1,fr5 /* r * 0.5 */ |
341 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ |
342 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ |
343 | fmuls fr3,fr1,fr0 /* r * s */ |
344 | fmuls fr2,fr1,fr5 /* r * 0.5 */ |
345 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ |
346 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ |
347 | stfsx fr1,r3,r6 |
348 | addi r6,r6,4 |
349 | bdnz 1b |
350 | lfd fr5,56(r1) |
351 | lfd fr4,48(r1) |
352 | lfd fr3,40(r1) |
353 | lfd fr2,32(r1) |
354 | b fpdisable |
355 | |