1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_GENERIC_CACHEFLUSH_H |
3 | #define _ASM_GENERIC_CACHEFLUSH_H |
4 | |
5 | #include <linux/instrumented.h> |
6 | |
7 | struct mm_struct; |
8 | struct vm_area_struct; |
9 | struct page; |
10 | struct address_space; |
11 | |
12 | /* |
13 | * The cache doesn't need to be flushed when TLB entries change when |
14 | * the cache is mapped to physical memory, not virtual memory |
15 | */ |
16 | #ifndef flush_cache_all |
17 | static inline void flush_cache_all(void) |
18 | { |
19 | } |
20 | #endif |
21 | |
22 | #ifndef flush_cache_mm |
23 | static inline void flush_cache_mm(struct mm_struct *mm) |
24 | { |
25 | } |
26 | #endif |
27 | |
28 | #ifndef flush_cache_dup_mm |
29 | static inline void flush_cache_dup_mm(struct mm_struct *mm) |
30 | { |
31 | } |
32 | #endif |
33 | |
34 | #ifndef flush_cache_range |
35 | static inline void flush_cache_range(struct vm_area_struct *vma, |
36 | unsigned long start, |
37 | unsigned long end) |
38 | { |
39 | } |
40 | #endif |
41 | |
42 | #ifndef flush_cache_page |
43 | static inline void flush_cache_page(struct vm_area_struct *vma, |
44 | unsigned long vmaddr, |
45 | unsigned long pfn) |
46 | { |
47 | } |
48 | #endif |
49 | |
50 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
51 | static inline void flush_dcache_page(struct page *page) |
52 | { |
53 | } |
54 | |
55 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
56 | #endif |
57 | |
58 | #ifndef flush_dcache_mmap_lock |
59 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) |
60 | { |
61 | } |
62 | #endif |
63 | |
64 | #ifndef flush_dcache_mmap_unlock |
65 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) |
66 | { |
67 | } |
68 | #endif |
69 | |
70 | #ifndef flush_icache_range |
71 | static inline void flush_icache_range(unsigned long start, unsigned long end) |
72 | { |
73 | } |
74 | #endif |
75 | |
76 | #ifndef flush_icache_user_range |
77 | #define flush_icache_user_range flush_icache_range |
78 | #endif |
79 | |
80 | #ifndef flush_icache_user_page |
81 | static inline void flush_icache_user_page(struct vm_area_struct *vma, |
82 | struct page *page, |
83 | unsigned long addr, int len) |
84 | { |
85 | } |
86 | #endif |
87 | |
88 | #ifndef flush_cache_vmap |
89 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
90 | { |
91 | } |
92 | #endif |
93 | |
94 | #ifndef flush_cache_vunmap |
95 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
96 | { |
97 | } |
98 | #endif |
99 | |
100 | #ifndef copy_to_user_page |
101 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
102 | do { \ |
103 | instrument_copy_to_user((void __user *)dst, src, len); \ |
104 | memcpy(dst, src, len); \ |
105 | flush_icache_user_page(vma, page, vaddr, len); \ |
106 | } while (0) |
107 | #endif |
108 | |
109 | |
110 | #ifndef copy_from_user_page |
111 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
112 | do { \ |
113 | instrument_copy_from_user_before(dst, (void __user *)src, \ |
114 | len); \ |
115 | memcpy(dst, src, len); \ |
116 | instrument_copy_from_user_after(dst, (void __user *)src, len, \ |
117 | 0); \ |
118 | } while (0) |
119 | #endif |
120 | |
121 | #endif /* _ASM_GENERIC_CACHEFLUSH_H */ |
122 | |