1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_SPECIAL_INSNS_H
3#define _ASM_X86_SPECIAL_INSNS_H
4
5
6#ifdef __KERNEL__
7
8#include <asm/nops.h>
9#include <asm/processor-flags.h>
10#include <linux/irqflags.h>
11#include <linux/jump_label.h>
12
13/*
14 * The compiler should not reorder volatile asm statements with respect to each
15 * other: they should execute in program order. However GCC 4.9.x and 5.x have
16 * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
17 * volatile asm. The write functions are not affected since they have memory
18 * clobbers preventing reordering. To prevent reads from being reordered with
19 * respect to writes, use a dummy memory operand.
20 */
21
22#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
23
24void native_write_cr0(unsigned long val);
25
26static inline unsigned long native_read_cr0(void)
27{
28 unsigned long val;
29 asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
30 return val;
31}
32
33static __always_inline unsigned long native_read_cr2(void)
34{
35 unsigned long val;
36 asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
37 return val;
38}
39
40static __always_inline void native_write_cr2(unsigned long val)
41{
42 asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
43}
44
45static inline unsigned long __native_read_cr3(void)
46{
47 unsigned long val;
48 asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
49 return val;
50}
51
52static inline void native_write_cr3(unsigned long val)
53{
54 asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
55}
56
57static inline unsigned long native_read_cr4(void)
58{
59 unsigned long val;
60#ifdef CONFIG_X86_32
61 /*
62 * This could fault if CR4 does not exist. Non-existent CR4
63 * is functionally equivalent to CR4 == 0. Keep it simple and pretend
64 * that CR4 == 0 on CPUs that don't have CR4.
65 */
66 asm volatile("1: mov %%cr4, %0\n"
67 "2:\n"
68 _ASM_EXTABLE(1b, 2b)
69 : "=r" (val) : "0" (0), __FORCE_ORDER);
70#else
71 /* CR4 always exists on x86_64. */
72 asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
73#endif
74 return val;
75}
76
77void native_write_cr4(unsigned long val);
78
79#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
80static inline u32 rdpkru(void)
81{
82 u32 ecx = 0;
83 u32 edx, pkru;
84
85 /*
86 * "rdpkru" instruction. Places PKRU contents in to EAX,
87 * clears EDX and requires that ecx=0.
88 */
89 asm volatile(".byte 0x0f,0x01,0xee\n\t"
90 : "=a" (pkru), "=d" (edx)
91 : "c" (ecx));
92 return pkru;
93}
94
95static inline void wrpkru(u32 pkru)
96{
97 u32 ecx = 0, edx = 0;
98
99 /*
100 * "wrpkru" instruction. Loads contents in EAX to PKRU,
101 * requires that ecx = edx = 0.
102 */
103 asm volatile(".byte 0x0f,0x01,0xef\n\t"
104 : : "a" (pkru), "c"(ecx), "d"(edx));
105}
106
107#else
108static inline u32 rdpkru(void)
109{
110 return 0;
111}
112
113static inline void wrpkru(u32 pkru)
114{
115}
116#endif
117
118static __always_inline void native_wbinvd(void)
119{
120 asm volatile("wbinvd": : :"memory");
121}
122
123static inline unsigned long __read_cr4(void)
124{
125 return native_read_cr4();
126}
127
128#ifdef CONFIG_PARAVIRT_XXL
129#include <asm/paravirt.h>
130#else
131
132static inline unsigned long read_cr0(void)
133{
134 return native_read_cr0();
135}
136
137static inline void write_cr0(unsigned long x)
138{
139 native_write_cr0(x);
140}
141
142static __always_inline unsigned long read_cr2(void)
143{
144 return native_read_cr2();
145}
146
147static __always_inline void write_cr2(unsigned long x)
148{
149 native_write_cr2(x);
150}
151
152/*
153 * Careful! CR3 contains more than just an address. You probably want
154 * read_cr3_pa() instead.
155 */
156static inline unsigned long __read_cr3(void)
157{
158 return __native_read_cr3();
159}
160
161static inline void write_cr3(unsigned long x)
162{
163 native_write_cr3(x);
164}
165
166static inline void __write_cr4(unsigned long x)
167{
168 native_write_cr4(x);
169}
170
171static __always_inline void wbinvd(void)
172{
173 native_wbinvd();
174}
175
176#endif /* CONFIG_PARAVIRT_XXL */
177
178static __always_inline void clflush(volatile void *__p)
179{
180 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
181}
182
183static inline void clflushopt(volatile void *__p)
184{
185 alternative_io(".byte 0x3e; clflush %P0",
186 ".byte 0x66; clflush %P0",
187 X86_FEATURE_CLFLUSHOPT,
188 "+m" (*(volatile char __force *)__p));
189}
190
191static inline void clwb(volatile void *__p)
192{
193 volatile struct { char x[64]; } *p = __p;
194
195 asm volatile(ALTERNATIVE_2(
196 ".byte 0x3e; clflush (%[pax])",
197 ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
198 X86_FEATURE_CLFLUSHOPT,
199 ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
200 X86_FEATURE_CLWB)
201 : [p] "+m" (*p)
202 : [pax] "a" (p));
203}
204
205#ifdef CONFIG_X86_USER_SHADOW_STACK
206static inline int write_user_shstk_64(u64 __user *addr, u64 val)
207{
208 asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
209 _ASM_EXTABLE(1b, %l[fail])
210 :: [addr] "r" (addr), [val] "r" (val)
211 :: fail);
212 return 0;
213fail:
214 return -EFAULT;
215}
216#endif /* CONFIG_X86_USER_SHADOW_STACK */
217
218#define nop() asm volatile ("nop")
219
220static inline void serialize(void)
221{
222 /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
223 asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
224}
225
226/* The dst parameter must be 64-bytes aligned */
227static inline void movdir64b(void __iomem *dst, const void *src)
228{
229 const struct { char _[64]; } *__src = src;
230 struct { char _[64]; } __iomem *__dst = dst;
231
232 /*
233 * MOVDIR64B %(rdx), rax.
234 *
235 * Both __src and __dst must be memory constraints in order to tell the
236 * compiler that no other memory accesses should be reordered around
237 * this one.
238 *
239 * Also, both must be supplied as lvalues because this tells
240 * the compiler what the object is (its size) the instruction accesses.
241 * I.e., not the pointers but what they point to, thus the deref'ing '*'.
242 */
243 asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
244 : "+m" (*__dst)
245 : "m" (*__src), "a" (__dst), "d" (__src));
246}
247
248/**
249 * enqcmds - Enqueue a command in supervisor (CPL0) mode
250 * @dst: destination, in MMIO space (must be 512-bit aligned)
251 * @src: 512 bits memory operand
252 *
253 * The ENQCMDS instruction allows software to write a 512-bit command to
254 * a 512-bit-aligned special MMIO region that supports the instruction.
255 * A return status is loaded into the ZF flag in the RFLAGS register.
256 * ZF = 0 equates to success, and ZF = 1 indicates retry or error.
257 *
258 * This function issues the ENQCMDS instruction to submit data from
259 * kernel space to MMIO space, in a unit of 512 bits. Order of data access
260 * is not guaranteed, nor is a memory barrier performed afterwards. It
261 * returns 0 on success and -EAGAIN on failure.
262 *
263 * Warning: Do not use this helper unless your driver has checked that the
264 * ENQCMDS instruction is supported on the platform and the device accepts
265 * ENQCMDS.
266 */
267static inline int enqcmds(void __iomem *dst, const void *src)
268{
269 const struct { char _[64]; } *__src = src;
270 struct { char _[64]; } __iomem *__dst = dst;
271 bool zf;
272
273 /*
274 * ENQCMDS %(rdx), rax
275 *
276 * See movdir64b()'s comment on operand specification.
277 */
278 asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90"
279 CC_SET(z)
280 : CC_OUT(z) (zf), "+m" (*__dst)
281 : "m" (*__src), "a" (__dst), "d" (__src));
282
283 /* Submission failure is indicated via EFLAGS.ZF=1 */
284 if (zf)
285 return -EAGAIN;
286
287 return 0;
288}
289
290static __always_inline void tile_release(void)
291{
292 /*
293 * Instruction opcode for TILERELEASE; supported in binutils
294 * version >= 2.36.
295 */
296 asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0");
297}
298
299#endif /* __KERNEL__ */
300
301#endif /* _ASM_X86_SPECIAL_INSNS_H */
302

source code of linux/arch/x86/include/asm/special_insns.h