1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_CACHE_H |
3 | #define __LINUX_CACHE_H |
4 | |
5 | #include <uapi/linux/kernel.h> |
6 | #include <asm/cache.h> |
7 | |
8 | #ifndef L1_CACHE_ALIGN |
9 | #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) |
10 | #endif |
11 | |
12 | #ifndef SMP_CACHE_BYTES |
13 | #define SMP_CACHE_BYTES L1_CACHE_BYTES |
14 | #endif |
15 | |
16 | /* |
17 | * __read_mostly is used to keep rarely changing variables out of frequently |
18 | * updated cachelines. Its use should be reserved for data that is used |
19 | * frequently in hot paths. Performance traces can help decide when to use |
20 | * this. You want __read_mostly data to be tightly packed, so that in the |
21 | * best case multiple frequently read variables for a hot path will be next |
22 | * to each other in order to reduce the number of cachelines needed to |
23 | * execute a critical path. We should be mindful and selective of its use. |
24 | * ie: if you're going to use it please supply a *good* justification in your |
25 | * commit log |
26 | */ |
27 | #ifndef __read_mostly |
28 | #define __read_mostly |
29 | #endif |
30 | |
31 | /* |
32 | * __ro_after_init is used to mark things that are read-only after init (i.e. |
33 | * after mark_rodata_ro() has been called). These are effectively read-only, |
34 | * but may get written to during init, so can't live in .rodata (via "const"). |
35 | */ |
36 | #ifndef __ro_after_init |
37 | #define __ro_after_init __section(".data..ro_after_init") |
38 | #endif |
39 | |
40 | #ifndef ____cacheline_aligned |
41 | #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) |
42 | #endif |
43 | |
44 | #ifndef ____cacheline_aligned_in_smp |
45 | #ifdef CONFIG_SMP |
46 | #define ____cacheline_aligned_in_smp ____cacheline_aligned |
47 | #else |
48 | #define ____cacheline_aligned_in_smp |
49 | #endif /* CONFIG_SMP */ |
50 | #endif |
51 | |
52 | #ifndef __cacheline_aligned |
53 | #define __cacheline_aligned \ |
54 | __attribute__((__aligned__(SMP_CACHE_BYTES), \ |
55 | __section__(".data..cacheline_aligned"))) |
56 | #endif /* __cacheline_aligned */ |
57 | |
58 | #ifndef __cacheline_aligned_in_smp |
59 | #ifdef CONFIG_SMP |
60 | #define __cacheline_aligned_in_smp __cacheline_aligned |
61 | #else |
62 | #define __cacheline_aligned_in_smp |
63 | #endif /* CONFIG_SMP */ |
64 | #endif |
65 | |
66 | /* |
67 | * The maximum alignment needed for some critical structures |
68 | * These could be inter-node cacheline sizes/L3 cacheline |
69 | * size etc. Define this in asm/cache.h for your arch |
70 | */ |
71 | #ifndef INTERNODE_CACHE_SHIFT |
72 | #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT |
73 | #endif |
74 | |
75 | #if !defined(____cacheline_internodealigned_in_smp) |
76 | #if defined(CONFIG_SMP) |
77 | #define ____cacheline_internodealigned_in_smp \ |
78 | __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) |
79 | #else |
80 | #define ____cacheline_internodealigned_in_smp |
81 | #endif |
82 | #endif |
83 | |
84 | #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE |
85 | #define cache_line_size() L1_CACHE_BYTES |
86 | #endif |
87 | |
88 | #ifndef __cacheline_group_begin |
89 | #define __cacheline_group_begin(GROUP) \ |
90 | __u8 __cacheline_group_begin__##GROUP[0] |
91 | #endif |
92 | |
93 | #ifndef __cacheline_group_end |
94 | #define __cacheline_group_end(GROUP) \ |
95 | __u8 __cacheline_group_end__##GROUP[0] |
96 | #endif |
97 | |
98 | #ifndef CACHELINE_ASSERT_GROUP_MEMBER |
99 | #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ |
100 | BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ |
101 | offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \ |
102 | offsetofend(TYPE, MEMBER) <= \ |
103 | offsetof(TYPE, __cacheline_group_end__##GROUP))) |
104 | #endif |
105 | |
106 | #ifndef CACHELINE_ASSERT_GROUP_SIZE |
107 | #define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \ |
108 | BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \ |
109 | offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \ |
110 | SIZE) |
111 | #endif |
112 | |
113 | /* |
114 | * Helper to add padding within a struct to ensure data fall into separate |
115 | * cachelines. |
116 | */ |
117 | #if defined(CONFIG_SMP) |
118 | struct cacheline_padding { |
119 | char x[0]; |
120 | } ____cacheline_internodealigned_in_smp; |
121 | #define CACHELINE_PADDING(name) struct cacheline_padding name |
122 | #else |
123 | #define CACHELINE_PADDING(name) |
124 | #endif |
125 | |
126 | #ifdef ARCH_DMA_MINALIGN |
127 | #define ARCH_HAS_DMA_MINALIGN |
128 | #else |
129 | #define ARCH_DMA_MINALIGN __alignof__(unsigned long long) |
130 | #endif |
131 | |
132 | #endif /* __LINUX_CACHE_H */ |
133 | |