1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_STRING_64_H |
3 | #define _ASM_X86_STRING_64_H |
4 | |
5 | #ifdef __KERNEL__ |
6 | #include <linux/jump_label.h> |
7 | |
8 | /* Written 2002 by Andi Kleen */ |
9 | |
10 | /* Even with __builtin_ the compiler may decide to use the out of line |
11 | function. */ |
12 | |
13 | #if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY) |
14 | #include <linux/kmsan_string.h> |
15 | #endif |
16 | |
17 | #define __HAVE_ARCH_MEMCPY 1 |
18 | extern void *memcpy(void *to, const void *from, size_t len); |
19 | extern void *__memcpy(void *to, const void *from, size_t len); |
20 | |
21 | #define __HAVE_ARCH_MEMSET |
22 | void *memset(void *s, int c, size_t n); |
23 | void *__memset(void *s, int c, size_t n); |
24 | KCFI_REFERENCE(__memset); |
25 | |
26 | /* |
27 | * KMSAN needs to instrument as much code as possible. Use C versions of |
28 | * memsetXX() from lib/string.c under KMSAN. |
29 | */ |
30 | #if !defined(CONFIG_KMSAN) |
31 | #define __HAVE_ARCH_MEMSET16 |
32 | static inline void *memset16(uint16_t *s, uint16_t v, size_t n) |
33 | { |
34 | const __auto_type s0 = s; |
35 | asm volatile ( |
36 | "rep stosw" |
37 | : "+D" (s), "+c" (n) |
38 | : "a" (v) |
39 | : "memory" |
40 | ); |
41 | return s0; |
42 | } |
43 | |
44 | #define __HAVE_ARCH_MEMSET32 |
45 | static inline void *memset32(uint32_t *s, uint32_t v, size_t n) |
46 | { |
47 | const __auto_type s0 = s; |
48 | asm volatile ( |
49 | "rep stosl" |
50 | : "+D" (s), "+c" (n) |
51 | : "a" (v) |
52 | : "memory" |
53 | ); |
54 | return s0; |
55 | } |
56 | |
57 | #define __HAVE_ARCH_MEMSET64 |
58 | static inline void *memset64(uint64_t *s, uint64_t v, size_t n) |
59 | { |
60 | const __auto_type s0 = s; |
61 | asm volatile ( |
62 | "rep stosq" |
63 | : "+D" (s), "+c" (n) |
64 | : "a" (v) |
65 | : "memory" |
66 | ); |
67 | return s0; |
68 | } |
69 | #endif |
70 | |
71 | #define __HAVE_ARCH_MEMMOVE |
72 | void *memmove(void *dest, const void *src, size_t count); |
73 | void *__memmove(void *dest, const void *src, size_t count); |
74 | KCFI_REFERENCE(__memmove); |
75 | |
76 | int memcmp(const void *cs, const void *ct, size_t count); |
77 | size_t strlen(const char *s); |
78 | char *strcpy(char *dest, const char *src); |
79 | char *strcat(char *dest, const char *src); |
80 | int strcmp(const char *cs, const char *ct); |
81 | |
82 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
83 | #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 |
84 | void __memcpy_flushcache(void *dst, const void *src, size_t cnt); |
85 | static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt) |
86 | { |
87 | if (__builtin_constant_p(cnt)) { |
88 | switch (cnt) { |
89 | case 4: |
90 | asm ("movntil %1, %0" : "=m" (*(u32 *)dst) : "r" (*(u32 *)src)); |
91 | return; |
92 | case 8: |
93 | asm ("movntiq %1, %0" : "=m" (*(u64 *)dst) : "r" (*(u64 *)src)); |
94 | return; |
95 | case 16: |
96 | asm ("movntiq %1, %0" : "=m" (*(u64 *)dst) : "r" (*(u64 *)src)); |
97 | asm ("movntiq %1, %0" : "=m" (*(u64 *)(dst + 8)) : "r" (*(u64 *)(src + 8))); |
98 | return; |
99 | } |
100 | } |
101 | __memcpy_flushcache(dst, src, cnt); |
102 | } |
103 | #endif |
104 | |
105 | #endif /* __KERNEL__ */ |
106 | |
107 | #endif /* _ASM_X86_STRING_64_H */ |
108 | |