1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_DAX_H |
3 | #define _LINUX_DAX_H |
4 | |
5 | #include <linux/fs.h> |
6 | #include <linux/mm.h> |
7 | #include <linux/radix-tree.h> |
8 | |
9 | typedef unsigned long dax_entry_t; |
10 | |
11 | struct dax_device; |
12 | struct gendisk; |
13 | struct iomap_ops; |
14 | struct iomap_iter; |
15 | struct iomap; |
16 | |
17 | enum dax_access_mode { |
18 | DAX_ACCESS, |
19 | DAX_RECOVERY_WRITE, |
20 | }; |
21 | |
22 | struct dax_operations { |
23 | /* |
24 | * direct_access: translate a device-relative |
25 | * logical-page-offset into an absolute physical pfn. Return the |
26 | * number of pages available for DAX at that pfn. |
27 | */ |
28 | long (*direct_access)(struct dax_device *, pgoff_t, long, |
29 | enum dax_access_mode, void **, pfn_t *); |
30 | /* |
31 | * Validate whether this device is usable as an fsdax backing |
32 | * device. |
33 | */ |
34 | bool (*dax_supported)(struct dax_device *, struct block_device *, int, |
35 | sector_t, sector_t); |
36 | /* zero_page_range: required operation. Zero page range */ |
37 | int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); |
38 | /* |
39 | * recovery_write: recover a poisoned range by DAX device driver |
40 | * capable of clearing poison. |
41 | */ |
42 | size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff, |
43 | void *addr, size_t bytes, struct iov_iter *iter); |
44 | }; |
45 | |
46 | struct dax_holder_operations { |
47 | /* |
48 | * notify_failure - notify memory failure into inner holder device |
49 | * @dax_dev: the dax device which contains the holder |
50 | * @offset: offset on this dax device where memory failure occurs |
51 | * @len: length of this memory failure event |
52 | * @flags: action flags for memory failure handler |
53 | */ |
54 | int (*notify_failure)(struct dax_device *dax_dev, u64 offset, |
55 | u64 len, int mf_flags); |
56 | }; |
57 | |
58 | #if IS_ENABLED(CONFIG_DAX) |
59 | struct dax_device *alloc_dax(void *private, const struct dax_operations *ops); |
60 | void *dax_holder(struct dax_device *dax_dev); |
61 | void put_dax(struct dax_device *dax_dev); |
62 | void kill_dax(struct dax_device *dax_dev); |
63 | void dax_write_cache(struct dax_device *dax_dev, bool wc); |
64 | bool dax_write_cache_enabled(struct dax_device *dax_dev); |
65 | bool dax_synchronous(struct dax_device *dax_dev); |
66 | void set_dax_synchronous(struct dax_device *dax_dev); |
67 | size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, |
68 | void *addr, size_t bytes, struct iov_iter *i); |
69 | /* |
70 | * Check if given mapping is supported by the file / underlying device. |
71 | */ |
72 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
73 | struct dax_device *dax_dev) |
74 | { |
75 | if (!(vma->vm_flags & VM_SYNC)) |
76 | return true; |
77 | if (!IS_DAX(file_inode(vma->vm_file))) |
78 | return false; |
79 | return dax_synchronous(dax_dev); |
80 | } |
81 | #else |
82 | static inline void *dax_holder(struct dax_device *dax_dev) |
83 | { |
84 | return NULL; |
85 | } |
86 | static inline struct dax_device *alloc_dax(void *private, |
87 | const struct dax_operations *ops) |
88 | { |
89 | /* |
90 | * Callers should check IS_ENABLED(CONFIG_DAX) to know if this |
91 | * NULL is an error or expected. |
92 | */ |
93 | return NULL; |
94 | } |
95 | static inline void put_dax(struct dax_device *dax_dev) |
96 | { |
97 | } |
98 | static inline void kill_dax(struct dax_device *dax_dev) |
99 | { |
100 | } |
101 | static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) |
102 | { |
103 | } |
104 | static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) |
105 | { |
106 | return false; |
107 | } |
108 | static inline bool dax_synchronous(struct dax_device *dax_dev) |
109 | { |
110 | return true; |
111 | } |
112 | static inline void set_dax_synchronous(struct dax_device *dax_dev) |
113 | { |
114 | } |
115 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
116 | struct dax_device *dax_dev) |
117 | { |
118 | return !(vma->vm_flags & VM_SYNC); |
119 | } |
120 | static inline size_t dax_recovery_write(struct dax_device *dax_dev, |
121 | pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) |
122 | { |
123 | return 0; |
124 | } |
125 | #endif |
126 | |
127 | void set_dax_nocache(struct dax_device *dax_dev); |
128 | void set_dax_nomc(struct dax_device *dax_dev); |
129 | |
130 | struct writeback_control; |
131 | #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) |
132 | int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); |
133 | void dax_remove_host(struct gendisk *disk); |
134 | struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, |
135 | void *holder, const struct dax_holder_operations *ops); |
136 | void fs_put_dax(struct dax_device *dax_dev, void *holder); |
137 | #else |
138 | static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) |
139 | { |
140 | return 0; |
141 | } |
142 | static inline void dax_remove_host(struct gendisk *disk) |
143 | { |
144 | } |
145 | static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, |
146 | u64 *start_off, void *holder, |
147 | const struct dax_holder_operations *ops) |
148 | { |
149 | return NULL; |
150 | } |
151 | static inline void fs_put_dax(struct dax_device *dax_dev, void *holder) |
152 | { |
153 | } |
154 | #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ |
155 | |
156 | #if IS_ENABLED(CONFIG_FS_DAX) |
157 | int dax_writeback_mapping_range(struct address_space *mapping, |
158 | struct dax_device *dax_dev, struct writeback_control *wbc); |
159 | |
160 | struct page *dax_layout_busy_page(struct address_space *mapping); |
161 | struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); |
162 | dax_entry_t dax_lock_folio(struct folio *folio); |
163 | void dax_unlock_folio(struct folio *folio, dax_entry_t cookie); |
164 | dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, |
165 | unsigned long index, struct page **page); |
166 | void dax_unlock_mapping_entry(struct address_space *mapping, |
167 | unsigned long index, dax_entry_t cookie); |
168 | #else |
169 | static inline struct page *dax_layout_busy_page(struct address_space *mapping) |
170 | { |
171 | return NULL; |
172 | } |
173 | |
174 | static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) |
175 | { |
176 | return NULL; |
177 | } |
178 | |
179 | static inline int dax_writeback_mapping_range(struct address_space *mapping, |
180 | struct dax_device *dax_dev, struct writeback_control *wbc) |
181 | { |
182 | return -EOPNOTSUPP; |
183 | } |
184 | |
185 | static inline dax_entry_t dax_lock_folio(struct folio *folio) |
186 | { |
187 | if (IS_DAX(folio->mapping->host)) |
188 | return ~0UL; |
189 | return 0; |
190 | } |
191 | |
192 | static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) |
193 | { |
194 | } |
195 | |
196 | static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, |
197 | unsigned long index, struct page **page) |
198 | { |
199 | return 0; |
200 | } |
201 | |
202 | static inline void dax_unlock_mapping_entry(struct address_space *mapping, |
203 | unsigned long index, dax_entry_t cookie) |
204 | { |
205 | } |
206 | #endif |
207 | |
208 | int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, |
209 | const struct iomap_ops *ops); |
210 | int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, |
211 | const struct iomap_ops *ops); |
212 | int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, |
213 | const struct iomap_ops *ops); |
214 | |
215 | #if IS_ENABLED(CONFIG_DAX) |
216 | int dax_read_lock(void); |
217 | void dax_read_unlock(int id); |
218 | #else |
219 | static inline int dax_read_lock(void) |
220 | { |
221 | return 0; |
222 | } |
223 | |
224 | static inline void dax_read_unlock(int id) |
225 | { |
226 | } |
227 | #endif /* CONFIG_DAX */ |
228 | bool dax_alive(struct dax_device *dax_dev); |
229 | void *dax_get_private(struct dax_device *dax_dev); |
230 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, |
231 | enum dax_access_mode mode, void **kaddr, pfn_t *pfn); |
232 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
233 | size_t bytes, struct iov_iter *i); |
234 | size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
235 | size_t bytes, struct iov_iter *i); |
236 | int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
237 | size_t nr_pages); |
238 | int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len, |
239 | int mf_flags); |
240 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); |
241 | |
242 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
243 | const struct iomap_ops *ops); |
244 | vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, |
245 | pfn_t *pfnp, int *errp, const struct iomap_ops *ops); |
246 | vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, |
247 | unsigned int order, pfn_t pfn); |
248 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
249 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, |
250 | pgoff_t index); |
251 | int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, |
252 | struct inode *dest, loff_t destoff, |
253 | loff_t len, bool *is_same, |
254 | const struct iomap_ops *ops); |
255 | int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, |
256 | struct file *file_out, loff_t pos_out, |
257 | loff_t *len, unsigned int remap_flags, |
258 | const struct iomap_ops *ops); |
259 | static inline bool dax_mapping(struct address_space *mapping) |
260 | { |
261 | return mapping->host && IS_DAX(mapping->host); |
262 | } |
263 | |
264 | /* |
265 | * Due to dax's memory and block duo personalities, hwpoison reporting |
266 | * takes into consideration which personality is presently visible. |
267 | * When dax acts like a block device, such as in block IO, an encounter of |
268 | * dax hwpoison is reported as -EIO. |
269 | * When dax acts like memory, such as in page fault, a detection of hwpoison |
270 | * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON. |
271 | */ |
272 | static inline int dax_mem2blk_err(int err) |
273 | { |
274 | return (err == -EHWPOISON) ? -EIO : err; |
275 | } |
276 | |
277 | #ifdef CONFIG_DEV_DAX_HMEM_DEVICES |
278 | void hmem_register_resource(int target_nid, struct resource *r); |
279 | #else |
280 | static inline void hmem_register_resource(int target_nid, struct resource *r) |
281 | { |
282 | } |
283 | #endif |
284 | |
285 | typedef int (*walk_hmem_fn)(struct device *dev, int target_nid, |
286 | const struct resource *res); |
287 | int walk_hmem_resources(struct device *dev, walk_hmem_fn fn); |
288 | #endif |
289 | |