1 | #ifndef _LINUX_MMAP_LOCK_H |
2 | #define _LINUX_MMAP_LOCK_H |
3 | |
4 | #include <linux/lockdep.h> |
5 | #include <linux/mm_types.h> |
6 | #include <linux/mmdebug.h> |
7 | #include <linux/rwsem.h> |
8 | #include <linux/tracepoint-defs.h> |
9 | #include <linux/types.h> |
10 | |
11 | #define MMAP_LOCK_INITIALIZER(name) \ |
12 | .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), |
13 | |
14 | DECLARE_TRACEPOINT(mmap_lock_start_locking); |
15 | DECLARE_TRACEPOINT(mmap_lock_acquire_returned); |
16 | DECLARE_TRACEPOINT(mmap_lock_released); |
17 | |
18 | #ifdef CONFIG_TRACING |
19 | |
20 | void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); |
21 | void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, |
22 | bool success); |
23 | void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); |
24 | |
25 | static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, |
26 | bool write) |
27 | { |
28 | if (tracepoint_enabled(mmap_lock_start_locking)) |
29 | __mmap_lock_do_trace_start_locking(mm, write); |
30 | } |
31 | |
32 | static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, |
33 | bool write, bool success) |
34 | { |
35 | if (tracepoint_enabled(mmap_lock_acquire_returned)) |
36 | __mmap_lock_do_trace_acquire_returned(mm, write, success); |
37 | } |
38 | |
39 | static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) |
40 | { |
41 | if (tracepoint_enabled(mmap_lock_released)) |
42 | __mmap_lock_do_trace_released(mm, write); |
43 | } |
44 | |
45 | #else /* !CONFIG_TRACING */ |
46 | |
47 | static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, |
48 | bool write) |
49 | { |
50 | } |
51 | |
52 | static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, |
53 | bool write, bool success) |
54 | { |
55 | } |
56 | |
57 | static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) |
58 | { |
59 | } |
60 | |
61 | #endif /* CONFIG_TRACING */ |
62 | |
63 | static inline void mmap_init_lock(struct mm_struct *mm) |
64 | { |
65 | init_rwsem(&mm->mmap_lock); |
66 | } |
67 | |
68 | static inline void mmap_write_lock(struct mm_struct *mm) |
69 | { |
70 | __mmap_lock_trace_start_locking(mm, true); |
71 | down_write(&mm->mmap_lock); |
72 | __mmap_lock_trace_acquire_returned(mm, true, true); |
73 | } |
74 | |
75 | static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) |
76 | { |
77 | __mmap_lock_trace_start_locking(mm, true); |
78 | down_write_nested(&mm->mmap_lock, subclass); |
79 | __mmap_lock_trace_acquire_returned(mm, true, true); |
80 | } |
81 | |
82 | static inline int mmap_write_lock_killable(struct mm_struct *mm) |
83 | { |
84 | int ret; |
85 | |
86 | __mmap_lock_trace_start_locking(mm, true); |
87 | ret = down_write_killable(&mm->mmap_lock); |
88 | __mmap_lock_trace_acquire_returned(mm, true, ret == 0); |
89 | return ret; |
90 | } |
91 | |
92 | static inline bool mmap_write_trylock(struct mm_struct *mm) |
93 | { |
94 | bool ret; |
95 | |
96 | __mmap_lock_trace_start_locking(mm, true); |
97 | ret = down_write_trylock(&mm->mmap_lock) != 0; |
98 | __mmap_lock_trace_acquire_returned(mm, true, ret); |
99 | return ret; |
100 | } |
101 | |
102 | static inline void mmap_write_unlock(struct mm_struct *mm) |
103 | { |
104 | __mmap_lock_trace_released(mm, true); |
105 | up_write(&mm->mmap_lock); |
106 | } |
107 | |
108 | static inline void mmap_write_downgrade(struct mm_struct *mm) |
109 | { |
110 | __mmap_lock_trace_acquire_returned(mm, false, true); |
111 | downgrade_write(&mm->mmap_lock); |
112 | } |
113 | |
114 | static inline void mmap_read_lock(struct mm_struct *mm) |
115 | { |
116 | __mmap_lock_trace_start_locking(mm, false); |
117 | down_read(&mm->mmap_lock); |
118 | __mmap_lock_trace_acquire_returned(mm, false, true); |
119 | } |
120 | |
121 | static inline int mmap_read_lock_killable(struct mm_struct *mm) |
122 | { |
123 | int ret; |
124 | |
125 | __mmap_lock_trace_start_locking(mm, false); |
126 | ret = down_read_killable(&mm->mmap_lock); |
127 | __mmap_lock_trace_acquire_returned(mm, false, ret == 0); |
128 | return ret; |
129 | } |
130 | |
131 | static inline bool mmap_read_trylock(struct mm_struct *mm) |
132 | { |
133 | bool ret; |
134 | |
135 | __mmap_lock_trace_start_locking(mm, false); |
136 | ret = down_read_trylock(&mm->mmap_lock) != 0; |
137 | __mmap_lock_trace_acquire_returned(mm, false, ret); |
138 | return ret; |
139 | } |
140 | |
141 | static inline void mmap_read_unlock(struct mm_struct *mm) |
142 | { |
143 | __mmap_lock_trace_released(mm, false); |
144 | up_read(&mm->mmap_lock); |
145 | } |
146 | |
147 | static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) |
148 | { |
149 | __mmap_lock_trace_released(mm, false); |
150 | up_read_non_owner(&mm->mmap_lock); |
151 | } |
152 | |
153 | static inline void mmap_assert_locked(struct mm_struct *mm) |
154 | { |
155 | lockdep_assert_held(&mm->mmap_lock); |
156 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); |
157 | } |
158 | |
159 | static inline void mmap_assert_write_locked(struct mm_struct *mm) |
160 | { |
161 | lockdep_assert_held_write(&mm->mmap_lock); |
162 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); |
163 | } |
164 | |
165 | static inline int mmap_lock_is_contended(struct mm_struct *mm) |
166 | { |
167 | return rwsem_is_contended(&mm->mmap_lock); |
168 | } |
169 | |
170 | #endif /* _LINUX_MMAP_LOCK_H */ |
171 | |