1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2017-2018 HUAWEI, Inc. |
4 | * https://www.huawei.com/ |
5 | * Copyright (C) 2021, Alibaba Cloud |
6 | */ |
7 | #include "internal.h" |
8 | #include <linux/sched/mm.h> |
9 | #include <trace/events/erofs.h> |
10 | |
11 | void erofs_unmap_metabuf(struct erofs_buf *buf) |
12 | { |
13 | if (buf->kmap_type == EROFS_KMAP) |
14 | kunmap_local(buf->base); |
15 | buf->base = NULL; |
16 | buf->kmap_type = EROFS_NO_KMAP; |
17 | } |
18 | |
19 | void erofs_put_metabuf(struct erofs_buf *buf) |
20 | { |
21 | if (!buf->page) |
22 | return; |
23 | erofs_unmap_metabuf(buf); |
24 | put_page(page: buf->page); |
25 | buf->page = NULL; |
26 | } |
27 | |
28 | /* |
29 | * Derive the block size from inode->i_blkbits to make compatible with |
30 | * anonymous inode in fscache mode. |
31 | */ |
32 | void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, |
33 | enum erofs_kmap_type type) |
34 | { |
35 | struct inode *inode = buf->inode; |
36 | erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits; |
37 | pgoff_t index = offset >> PAGE_SHIFT; |
38 | struct page *page = buf->page; |
39 | struct folio *folio; |
40 | unsigned int nofs_flag; |
41 | |
42 | if (!page || page->index != index) { |
43 | erofs_put_metabuf(buf); |
44 | |
45 | nofs_flag = memalloc_nofs_save(); |
46 | folio = read_cache_folio(inode->i_mapping, index, NULL, NULL); |
47 | memalloc_nofs_restore(flags: nofs_flag); |
48 | if (IS_ERR(ptr: folio)) |
49 | return folio; |
50 | |
51 | /* should already be PageUptodate, no need to lock page */ |
52 | page = folio_file_page(folio, index); |
53 | buf->page = page; |
54 | } |
55 | if (buf->kmap_type == EROFS_NO_KMAP) { |
56 | if (type == EROFS_KMAP) |
57 | buf->base = kmap_local_page(page); |
58 | buf->kmap_type = type; |
59 | } else if (buf->kmap_type != type) { |
60 | DBG_BUGON(1); |
61 | return ERR_PTR(error: -EFAULT); |
62 | } |
63 | if (type == EROFS_NO_KMAP) |
64 | return NULL; |
65 | return buf->base + (offset & ~PAGE_MASK); |
66 | } |
67 | |
68 | void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) |
69 | { |
70 | if (erofs_is_fscache_mode(sb)) |
71 | buf->inode = EROFS_SB(sb)->s_fscache->inode; |
72 | else |
73 | buf->inode = sb->s_bdev->bd_inode; |
74 | } |
75 | |
76 | void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, |
77 | erofs_blk_t blkaddr, enum erofs_kmap_type type) |
78 | { |
79 | erofs_init_metabuf(buf, sb); |
80 | return erofs_bread(buf, blkaddr, type); |
81 | } |
82 | |
83 | static int erofs_map_blocks_flatmode(struct inode *inode, |
84 | struct erofs_map_blocks *map) |
85 | { |
86 | erofs_blk_t nblocks, lastblk; |
87 | u64 offset = map->m_la; |
88 | struct erofs_inode *vi = EROFS_I(inode); |
89 | struct super_block *sb = inode->i_sb; |
90 | bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); |
91 | |
92 | nblocks = erofs_iblks(inode); |
93 | lastblk = nblocks - tailendpacking; |
94 | |
95 | /* there is no hole in flatmode */ |
96 | map->m_flags = EROFS_MAP_MAPPED; |
97 | if (offset < erofs_pos(sb, lastblk)) { |
98 | map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la; |
99 | map->m_plen = erofs_pos(sb, lastblk) - offset; |
100 | } else if (tailendpacking) { |
101 | map->m_pa = erofs_iloc(inode) + vi->inode_isize + |
102 | vi->xattr_isize + erofs_blkoff(sb, offset); |
103 | map->m_plen = inode->i_size - offset; |
104 | |
105 | /* inline data should be located in the same meta block */ |
106 | if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) { |
107 | erofs_err(sb, "inline data cross block boundary @ nid %llu" , |
108 | vi->nid); |
109 | DBG_BUGON(1); |
110 | return -EFSCORRUPTED; |
111 | } |
112 | map->m_flags |= EROFS_MAP_META; |
113 | } else { |
114 | erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx" , |
115 | vi->nid, inode->i_size, map->m_la); |
116 | DBG_BUGON(1); |
117 | return -EIO; |
118 | } |
119 | return 0; |
120 | } |
121 | |
122 | int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) |
123 | { |
124 | struct super_block *sb = inode->i_sb; |
125 | struct erofs_inode *vi = EROFS_I(inode); |
126 | struct erofs_inode_chunk_index *idx; |
127 | struct erofs_buf buf = __EROFS_BUF_INITIALIZER; |
128 | u64 chunknr; |
129 | unsigned int unit; |
130 | erofs_off_t pos; |
131 | void *kaddr; |
132 | int err = 0; |
133 | |
134 | trace_erofs_map_blocks_enter(inode, map, flags: 0); |
135 | map->m_deviceid = 0; |
136 | if (map->m_la >= inode->i_size) { |
137 | /* leave out-of-bound access unmapped */ |
138 | map->m_flags = 0; |
139 | map->m_plen = 0; |
140 | goto out; |
141 | } |
142 | |
143 | if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { |
144 | err = erofs_map_blocks_flatmode(inode, map); |
145 | goto out; |
146 | } |
147 | |
148 | if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) |
149 | unit = sizeof(*idx); /* chunk index */ |
150 | else |
151 | unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ |
152 | |
153 | chunknr = map->m_la >> vi->chunkbits; |
154 | pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + |
155 | vi->xattr_isize, unit) + unit * chunknr; |
156 | |
157 | kaddr = erofs_read_metabuf(buf: &buf, sb, erofs_blknr(sb, pos), type: EROFS_KMAP); |
158 | if (IS_ERR(ptr: kaddr)) { |
159 | err = PTR_ERR(ptr: kaddr); |
160 | goto out; |
161 | } |
162 | map->m_la = chunknr << vi->chunkbits; |
163 | map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, |
164 | round_up(inode->i_size - map->m_la, sb->s_blocksize)); |
165 | |
166 | /* handle block map */ |
167 | if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { |
168 | __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos); |
169 | |
170 | if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { |
171 | map->m_flags = 0; |
172 | } else { |
173 | map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr)); |
174 | map->m_flags = EROFS_MAP_MAPPED; |
175 | } |
176 | goto out_unlock; |
177 | } |
178 | /* parse chunk indexes */ |
179 | idx = kaddr + erofs_blkoff(sb, pos); |
180 | switch (le32_to_cpu(idx->blkaddr)) { |
181 | case EROFS_NULL_ADDR: |
182 | map->m_flags = 0; |
183 | break; |
184 | default: |
185 | map->m_deviceid = le16_to_cpu(idx->device_id) & |
186 | EROFS_SB(sb)->device_id_mask; |
187 | map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr)); |
188 | map->m_flags = EROFS_MAP_MAPPED; |
189 | break; |
190 | } |
191 | out_unlock: |
192 | erofs_put_metabuf(buf: &buf); |
193 | out: |
194 | if (!err) |
195 | map->m_llen = map->m_plen; |
196 | trace_erofs_map_blocks_exit(inode, map, flags: 0, ret: err); |
197 | return err; |
198 | } |
199 | |
200 | int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) |
201 | { |
202 | struct erofs_dev_context *devs = EROFS_SB(sb)->devs; |
203 | struct erofs_device_info *dif; |
204 | int id; |
205 | |
206 | map->m_bdev = sb->s_bdev; |
207 | map->m_daxdev = EROFS_SB(sb)->dax_dev; |
208 | map->m_dax_part_off = EROFS_SB(sb)->dax_part_off; |
209 | map->m_fscache = EROFS_SB(sb)->s_fscache; |
210 | |
211 | if (map->m_deviceid) { |
212 | down_read(sem: &devs->rwsem); |
213 | dif = idr_find(&devs->tree, id: map->m_deviceid - 1); |
214 | if (!dif) { |
215 | up_read(sem: &devs->rwsem); |
216 | return -ENODEV; |
217 | } |
218 | if (devs->flatdev) { |
219 | map->m_pa += erofs_pos(sb, dif->mapped_blkaddr); |
220 | up_read(sem: &devs->rwsem); |
221 | return 0; |
222 | } |
223 | map->m_bdev = dif->bdev_file ? file_bdev(bdev_file: dif->bdev_file) : NULL; |
224 | map->m_daxdev = dif->dax_dev; |
225 | map->m_dax_part_off = dif->dax_part_off; |
226 | map->m_fscache = dif->fscache; |
227 | up_read(sem: &devs->rwsem); |
228 | } else if (devs->extra_devices && !devs->flatdev) { |
229 | down_read(sem: &devs->rwsem); |
230 | idr_for_each_entry(&devs->tree, dif, id) { |
231 | erofs_off_t startoff, length; |
232 | |
233 | if (!dif->mapped_blkaddr) |
234 | continue; |
235 | startoff = erofs_pos(sb, dif->mapped_blkaddr); |
236 | length = erofs_pos(sb, dif->blocks); |
237 | |
238 | if (map->m_pa >= startoff && |
239 | map->m_pa < startoff + length) { |
240 | map->m_pa -= startoff; |
241 | map->m_bdev = dif->bdev_file ? |
242 | file_bdev(bdev_file: dif->bdev_file) : NULL; |
243 | map->m_daxdev = dif->dax_dev; |
244 | map->m_dax_part_off = dif->dax_part_off; |
245 | map->m_fscache = dif->fscache; |
246 | break; |
247 | } |
248 | } |
249 | up_read(sem: &devs->rwsem); |
250 | } |
251 | return 0; |
252 | } |
253 | |
254 | static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
255 | unsigned int flags, struct iomap *iomap, struct iomap *srcmap) |
256 | { |
257 | int ret; |
258 | struct super_block *sb = inode->i_sb; |
259 | struct erofs_map_blocks map; |
260 | struct erofs_map_dev mdev; |
261 | |
262 | map.m_la = offset; |
263 | map.m_llen = length; |
264 | |
265 | ret = erofs_map_blocks(inode, map: &map); |
266 | if (ret < 0) |
267 | return ret; |
268 | |
269 | mdev = (struct erofs_map_dev) { |
270 | .m_deviceid = map.m_deviceid, |
271 | .m_pa = map.m_pa, |
272 | }; |
273 | ret = erofs_map_dev(sb, map: &mdev); |
274 | if (ret) |
275 | return ret; |
276 | |
277 | iomap->offset = map.m_la; |
278 | if (flags & IOMAP_DAX) |
279 | iomap->dax_dev = mdev.m_daxdev; |
280 | else |
281 | iomap->bdev = mdev.m_bdev; |
282 | iomap->length = map.m_llen; |
283 | iomap->flags = 0; |
284 | iomap->private = NULL; |
285 | |
286 | if (!(map.m_flags & EROFS_MAP_MAPPED)) { |
287 | iomap->type = IOMAP_HOLE; |
288 | iomap->addr = IOMAP_NULL_ADDR; |
289 | if (!iomap->length) |
290 | iomap->length = length; |
291 | return 0; |
292 | } |
293 | |
294 | if (map.m_flags & EROFS_MAP_META) { |
295 | void *ptr; |
296 | struct erofs_buf buf = __EROFS_BUF_INITIALIZER; |
297 | |
298 | iomap->type = IOMAP_INLINE; |
299 | ptr = erofs_read_metabuf(buf: &buf, sb, |
300 | erofs_blknr(sb, mdev.m_pa), type: EROFS_KMAP); |
301 | if (IS_ERR(ptr)) |
302 | return PTR_ERR(ptr); |
303 | iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); |
304 | iomap->private = buf.base; |
305 | } else { |
306 | iomap->type = IOMAP_MAPPED; |
307 | iomap->addr = mdev.m_pa; |
308 | if (flags & IOMAP_DAX) |
309 | iomap->addr += mdev.m_dax_part_off; |
310 | } |
311 | return 0; |
312 | } |
313 | |
314 | static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length, |
315 | ssize_t written, unsigned int flags, struct iomap *iomap) |
316 | { |
317 | void *ptr = iomap->private; |
318 | |
319 | if (ptr) { |
320 | struct erofs_buf buf = { |
321 | .page = kmap_to_page(addr: ptr), |
322 | .base = ptr, |
323 | .kmap_type = EROFS_KMAP, |
324 | }; |
325 | |
326 | DBG_BUGON(iomap->type != IOMAP_INLINE); |
327 | erofs_put_metabuf(buf: &buf); |
328 | } else { |
329 | DBG_BUGON(iomap->type == IOMAP_INLINE); |
330 | } |
331 | return written; |
332 | } |
333 | |
334 | static const struct iomap_ops erofs_iomap_ops = { |
335 | .iomap_begin = erofs_iomap_begin, |
336 | .iomap_end = erofs_iomap_end, |
337 | }; |
338 | |
339 | int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
340 | u64 start, u64 len) |
341 | { |
342 | if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { |
343 | #ifdef CONFIG_EROFS_FS_ZIP |
344 | return iomap_fiemap(inode, fieinfo, start, len, |
345 | ops: &z_erofs_iomap_report_ops); |
346 | #else |
347 | return -EOPNOTSUPP; |
348 | #endif |
349 | } |
350 | return iomap_fiemap(inode, fieinfo, start, len, ops: &erofs_iomap_ops); |
351 | } |
352 | |
353 | /* |
354 | * since we dont have write or truncate flows, so no inode |
355 | * locking needs to be held at the moment. |
356 | */ |
357 | static int erofs_read_folio(struct file *file, struct folio *folio) |
358 | { |
359 | return iomap_read_folio(folio, ops: &erofs_iomap_ops); |
360 | } |
361 | |
362 | static void erofs_readahead(struct readahead_control *rac) |
363 | { |
364 | return iomap_readahead(rac, ops: &erofs_iomap_ops); |
365 | } |
366 | |
367 | static sector_t erofs_bmap(struct address_space *mapping, sector_t block) |
368 | { |
369 | return iomap_bmap(mapping, bno: block, ops: &erofs_iomap_ops); |
370 | } |
371 | |
372 | static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
373 | { |
374 | struct inode *inode = file_inode(f: iocb->ki_filp); |
375 | |
376 | /* no need taking (shared) inode lock since it's a ro filesystem */ |
377 | if (!iov_iter_count(i: to)) |
378 | return 0; |
379 | |
380 | #ifdef CONFIG_FS_DAX |
381 | if (IS_DAX(inode)) |
382 | return dax_iomap_rw(iocb, iter: to, ops: &erofs_iomap_ops); |
383 | #endif |
384 | if (iocb->ki_flags & IOCB_DIRECT) { |
385 | struct block_device *bdev = inode->i_sb->s_bdev; |
386 | unsigned int blksize_mask; |
387 | |
388 | if (bdev) |
389 | blksize_mask = bdev_logical_block_size(bdev) - 1; |
390 | else |
391 | blksize_mask = i_blocksize(node: inode) - 1; |
392 | |
393 | if ((iocb->ki_pos | iov_iter_count(i: to) | |
394 | iov_iter_alignment(i: to)) & blksize_mask) |
395 | return -EINVAL; |
396 | |
397 | return iomap_dio_rw(iocb, iter: to, ops: &erofs_iomap_ops, |
398 | NULL, dio_flags: 0, NULL, done_before: 0); |
399 | } |
400 | return filemap_read(iocb, to, already_read: 0); |
401 | } |
402 | |
403 | /* for uncompressed (aligned) files and raw access for other files */ |
404 | const struct address_space_operations erofs_raw_access_aops = { |
405 | .read_folio = erofs_read_folio, |
406 | .readahead = erofs_readahead, |
407 | .bmap = erofs_bmap, |
408 | .direct_IO = noop_direct_IO, |
409 | .release_folio = iomap_release_folio, |
410 | .invalidate_folio = iomap_invalidate_folio, |
411 | }; |
412 | |
413 | #ifdef CONFIG_FS_DAX |
414 | static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf, |
415 | unsigned int order) |
416 | { |
417 | return dax_iomap_fault(vmf, order, NULL, NULL, ops: &erofs_iomap_ops); |
418 | } |
419 | |
420 | static vm_fault_t erofs_dax_fault(struct vm_fault *vmf) |
421 | { |
422 | return erofs_dax_huge_fault(vmf, order: 0); |
423 | } |
424 | |
425 | static const struct vm_operations_struct erofs_dax_vm_ops = { |
426 | .fault = erofs_dax_fault, |
427 | .huge_fault = erofs_dax_huge_fault, |
428 | }; |
429 | |
430 | static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) |
431 | { |
432 | if (!IS_DAX(file_inode(file))) |
433 | return generic_file_readonly_mmap(file, vma); |
434 | |
435 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) |
436 | return -EINVAL; |
437 | |
438 | vma->vm_ops = &erofs_dax_vm_ops; |
439 | vm_flags_set(vma, VM_HUGEPAGE); |
440 | return 0; |
441 | } |
442 | #else |
443 | #define erofs_file_mmap generic_file_readonly_mmap |
444 | #endif |
445 | |
446 | const struct file_operations erofs_file_fops = { |
447 | .llseek = generic_file_llseek, |
448 | .read_iter = erofs_file_read_iter, |
449 | .mmap = erofs_file_mmap, |
450 | .get_unmapped_area = thp_get_unmapped_area, |
451 | .splice_read = filemap_splice_read, |
452 | }; |
453 | |