1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 1991, 1992 Linus Torvalds |
4 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
5 | * Copyright (C) 2016 - 2020 Christoph Hellwig |
6 | */ |
7 | #include <linux/init.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/blkdev.h> |
10 | #include <linux/buffer_head.h> |
11 | #include <linux/mpage.h> |
12 | #include <linux/uio.h> |
13 | #include <linux/namei.h> |
14 | #include <linux/task_io_accounting_ops.h> |
15 | #include <linux/falloc.h> |
16 | #include <linux/suspend.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/iomap.h> |
19 | #include <linux/module.h> |
20 | #include "blk.h" |
21 | |
22 | static inline struct inode *bdev_file_inode(struct file *file) |
23 | { |
24 | return file->f_mapping->host; |
25 | } |
26 | |
27 | static blk_opf_t dio_bio_write_op(struct kiocb *iocb) |
28 | { |
29 | blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
30 | |
31 | /* avoid the need for a I/O completion work item */ |
32 | if (iocb_is_dsync(iocb)) |
33 | opf |= REQ_FUA; |
34 | return opf; |
35 | } |
36 | |
37 | static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos, |
38 | struct iov_iter *iter) |
39 | { |
40 | return pos & (bdev_logical_block_size(bdev) - 1) || |
41 | !bdev_iter_is_aligned(bdev, iter); |
42 | } |
43 | |
44 | #define DIO_INLINE_BIO_VECS 4 |
45 | |
46 | static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, |
47 | struct iov_iter *iter, unsigned int nr_pages) |
48 | { |
49 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
50 | struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; |
51 | loff_t pos = iocb->ki_pos; |
52 | bool should_dirty = false; |
53 | struct bio bio; |
54 | ssize_t ret; |
55 | |
56 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
57 | return -EINVAL; |
58 | |
59 | if (nr_pages <= DIO_INLINE_BIO_VECS) |
60 | vecs = inline_vecs; |
61 | else { |
62 | vecs = kmalloc_array(n: nr_pages, size: sizeof(struct bio_vec), |
63 | GFP_KERNEL); |
64 | if (!vecs) |
65 | return -ENOMEM; |
66 | } |
67 | |
68 | if (iov_iter_rw(i: iter) == READ) { |
69 | bio_init(bio: &bio, bdev, table: vecs, max_vecs: nr_pages, opf: REQ_OP_READ); |
70 | if (user_backed_iter(i: iter)) |
71 | should_dirty = true; |
72 | } else { |
73 | bio_init(bio: &bio, bdev, table: vecs, max_vecs: nr_pages, opf: dio_bio_write_op(iocb)); |
74 | } |
75 | bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
76 | bio.bi_ioprio = iocb->ki_ioprio; |
77 | |
78 | ret = bio_iov_iter_get_pages(bio: &bio, iter); |
79 | if (unlikely(ret)) |
80 | goto out; |
81 | ret = bio.bi_iter.bi_size; |
82 | |
83 | if (iov_iter_rw(i: iter) == WRITE) |
84 | task_io_account_write(bytes: ret); |
85 | |
86 | if (iocb->ki_flags & IOCB_NOWAIT) |
87 | bio.bi_opf |= REQ_NOWAIT; |
88 | |
89 | submit_bio_wait(bio: &bio); |
90 | |
91 | bio_release_pages(bio: &bio, mark_dirty: should_dirty); |
92 | if (unlikely(bio.bi_status)) |
93 | ret = blk_status_to_errno(status: bio.bi_status); |
94 | |
95 | out: |
96 | if (vecs != inline_vecs) |
97 | kfree(objp: vecs); |
98 | |
99 | bio_uninit(&bio); |
100 | |
101 | return ret; |
102 | } |
103 | |
104 | enum { |
105 | DIO_SHOULD_DIRTY = 1, |
106 | DIO_IS_SYNC = 2, |
107 | }; |
108 | |
109 | struct blkdev_dio { |
110 | union { |
111 | struct kiocb *iocb; |
112 | struct task_struct *waiter; |
113 | }; |
114 | size_t size; |
115 | atomic_t ref; |
116 | unsigned int flags; |
117 | struct bio bio ____cacheline_aligned_in_smp; |
118 | }; |
119 | |
120 | static struct bio_set blkdev_dio_pool; |
121 | |
122 | static void blkdev_bio_end_io(struct bio *bio) |
123 | { |
124 | struct blkdev_dio *dio = bio->bi_private; |
125 | bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; |
126 | |
127 | if (bio->bi_status && !dio->bio.bi_status) |
128 | dio->bio.bi_status = bio->bi_status; |
129 | |
130 | if (atomic_dec_and_test(v: &dio->ref)) { |
131 | if (!(dio->flags & DIO_IS_SYNC)) { |
132 | struct kiocb *iocb = dio->iocb; |
133 | ssize_t ret; |
134 | |
135 | WRITE_ONCE(iocb->private, NULL); |
136 | |
137 | if (likely(!dio->bio.bi_status)) { |
138 | ret = dio->size; |
139 | iocb->ki_pos += ret; |
140 | } else { |
141 | ret = blk_status_to_errno(status: dio->bio.bi_status); |
142 | } |
143 | |
144 | dio->iocb->ki_complete(iocb, ret); |
145 | bio_put(&dio->bio); |
146 | } else { |
147 | struct task_struct *waiter = dio->waiter; |
148 | |
149 | WRITE_ONCE(dio->waiter, NULL); |
150 | blk_wake_io_task(waiter); |
151 | } |
152 | } |
153 | |
154 | if (should_dirty) { |
155 | bio_check_pages_dirty(bio); |
156 | } else { |
157 | bio_release_pages(bio, mark_dirty: false); |
158 | bio_put(bio); |
159 | } |
160 | } |
161 | |
162 | static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
163 | unsigned int nr_pages) |
164 | { |
165 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
166 | struct blk_plug plug; |
167 | struct blkdev_dio *dio; |
168 | struct bio *bio; |
169 | bool is_read = (iov_iter_rw(i: iter) == READ), is_sync; |
170 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
171 | loff_t pos = iocb->ki_pos; |
172 | int ret = 0; |
173 | |
174 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
175 | return -EINVAL; |
176 | |
177 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
178 | opf |= REQ_ALLOC_CACHE; |
179 | bio = bio_alloc_bioset(bdev, nr_vecs: nr_pages, opf, GFP_KERNEL, |
180 | bs: &blkdev_dio_pool); |
181 | dio = container_of(bio, struct blkdev_dio, bio); |
182 | atomic_set(v: &dio->ref, i: 1); |
183 | /* |
184 | * Grab an extra reference to ensure the dio structure which is embedded |
185 | * into the first bio stays around. |
186 | */ |
187 | bio_get(bio); |
188 | |
189 | is_sync = is_sync_kiocb(kiocb: iocb); |
190 | if (is_sync) { |
191 | dio->flags = DIO_IS_SYNC; |
192 | dio->waiter = current; |
193 | } else { |
194 | dio->flags = 0; |
195 | dio->iocb = iocb; |
196 | } |
197 | |
198 | dio->size = 0; |
199 | if (is_read && user_backed_iter(i: iter)) |
200 | dio->flags |= DIO_SHOULD_DIRTY; |
201 | |
202 | blk_start_plug(&plug); |
203 | |
204 | for (;;) { |
205 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
206 | bio->bi_private = dio; |
207 | bio->bi_end_io = blkdev_bio_end_io; |
208 | bio->bi_ioprio = iocb->ki_ioprio; |
209 | |
210 | ret = bio_iov_iter_get_pages(bio, iter); |
211 | if (unlikely(ret)) { |
212 | bio->bi_status = BLK_STS_IOERR; |
213 | bio_endio(bio); |
214 | break; |
215 | } |
216 | if (iocb->ki_flags & IOCB_NOWAIT) { |
217 | /* |
218 | * This is nonblocking IO, and we need to allocate |
219 | * another bio if we have data left to map. As we |
220 | * cannot guarantee that one of the sub bios will not |
221 | * fail getting issued FOR NOWAIT and as error results |
222 | * are coalesced across all of them, be safe and ask for |
223 | * a retry of this from blocking context. |
224 | */ |
225 | if (unlikely(iov_iter_count(iter))) { |
226 | bio_release_pages(bio, mark_dirty: false); |
227 | bio_clear_flag(bio, bit: BIO_REFFED); |
228 | bio_put(bio); |
229 | blk_finish_plug(&plug); |
230 | return -EAGAIN; |
231 | } |
232 | bio->bi_opf |= REQ_NOWAIT; |
233 | } |
234 | |
235 | if (is_read) { |
236 | if (dio->flags & DIO_SHOULD_DIRTY) |
237 | bio_set_pages_dirty(bio); |
238 | } else { |
239 | task_io_account_write(bytes: bio->bi_iter.bi_size); |
240 | } |
241 | dio->size += bio->bi_iter.bi_size; |
242 | pos += bio->bi_iter.bi_size; |
243 | |
244 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); |
245 | if (!nr_pages) { |
246 | submit_bio(bio); |
247 | break; |
248 | } |
249 | atomic_inc(v: &dio->ref); |
250 | submit_bio(bio); |
251 | bio = bio_alloc(bdev, nr_vecs: nr_pages, opf, GFP_KERNEL); |
252 | } |
253 | |
254 | blk_finish_plug(&plug); |
255 | |
256 | if (!is_sync) |
257 | return -EIOCBQUEUED; |
258 | |
259 | for (;;) { |
260 | set_current_state(TASK_UNINTERRUPTIBLE); |
261 | if (!READ_ONCE(dio->waiter)) |
262 | break; |
263 | blk_io_schedule(); |
264 | } |
265 | __set_current_state(TASK_RUNNING); |
266 | |
267 | if (!ret) |
268 | ret = blk_status_to_errno(status: dio->bio.bi_status); |
269 | if (likely(!ret)) |
270 | ret = dio->size; |
271 | |
272 | bio_put(&dio->bio); |
273 | return ret; |
274 | } |
275 | |
276 | static void blkdev_bio_end_io_async(struct bio *bio) |
277 | { |
278 | struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); |
279 | struct kiocb *iocb = dio->iocb; |
280 | ssize_t ret; |
281 | |
282 | WRITE_ONCE(iocb->private, NULL); |
283 | |
284 | if (likely(!bio->bi_status)) { |
285 | ret = dio->size; |
286 | iocb->ki_pos += ret; |
287 | } else { |
288 | ret = blk_status_to_errno(status: bio->bi_status); |
289 | } |
290 | |
291 | iocb->ki_complete(iocb, ret); |
292 | |
293 | if (dio->flags & DIO_SHOULD_DIRTY) { |
294 | bio_check_pages_dirty(bio); |
295 | } else { |
296 | bio_release_pages(bio, mark_dirty: false); |
297 | bio_put(bio); |
298 | } |
299 | } |
300 | |
301 | static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, |
302 | struct iov_iter *iter, |
303 | unsigned int nr_pages) |
304 | { |
305 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
306 | bool is_read = iov_iter_rw(i: iter) == READ; |
307 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
308 | struct blkdev_dio *dio; |
309 | struct bio *bio; |
310 | loff_t pos = iocb->ki_pos; |
311 | int ret = 0; |
312 | |
313 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
314 | return -EINVAL; |
315 | |
316 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
317 | opf |= REQ_ALLOC_CACHE; |
318 | bio = bio_alloc_bioset(bdev, nr_vecs: nr_pages, opf, GFP_KERNEL, |
319 | bs: &blkdev_dio_pool); |
320 | dio = container_of(bio, struct blkdev_dio, bio); |
321 | dio->flags = 0; |
322 | dio->iocb = iocb; |
323 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
324 | bio->bi_end_io = blkdev_bio_end_io_async; |
325 | bio->bi_ioprio = iocb->ki_ioprio; |
326 | |
327 | if (iov_iter_is_bvec(i: iter)) { |
328 | /* |
329 | * Users don't rely on the iterator being in any particular |
330 | * state for async I/O returning -EIOCBQUEUED, hence we can |
331 | * avoid expensive iov_iter_advance(). Bypass |
332 | * bio_iov_iter_get_pages() and set the bvec directly. |
333 | */ |
334 | bio_iov_bvec_set(bio, iter); |
335 | } else { |
336 | ret = bio_iov_iter_get_pages(bio, iter); |
337 | if (unlikely(ret)) { |
338 | bio_put(bio); |
339 | return ret; |
340 | } |
341 | } |
342 | dio->size = bio->bi_iter.bi_size; |
343 | |
344 | if (is_read) { |
345 | if (user_backed_iter(i: iter)) { |
346 | dio->flags |= DIO_SHOULD_DIRTY; |
347 | bio_set_pages_dirty(bio); |
348 | } |
349 | } else { |
350 | task_io_account_write(bytes: bio->bi_iter.bi_size); |
351 | } |
352 | |
353 | if (iocb->ki_flags & IOCB_NOWAIT) |
354 | bio->bi_opf |= REQ_NOWAIT; |
355 | |
356 | if (iocb->ki_flags & IOCB_HIPRI) { |
357 | bio->bi_opf |= REQ_POLLED; |
358 | submit_bio(bio); |
359 | WRITE_ONCE(iocb->private, bio); |
360 | } else { |
361 | submit_bio(bio); |
362 | } |
363 | return -EIOCBQUEUED; |
364 | } |
365 | |
366 | static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
367 | { |
368 | unsigned int nr_pages; |
369 | |
370 | if (!iov_iter_count(i: iter)) |
371 | return 0; |
372 | |
373 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); |
374 | if (likely(nr_pages <= BIO_MAX_VECS)) { |
375 | if (is_sync_kiocb(kiocb: iocb)) |
376 | return __blkdev_direct_IO_simple(iocb, iter, nr_pages); |
377 | return __blkdev_direct_IO_async(iocb, iter, nr_pages); |
378 | } |
379 | return __blkdev_direct_IO(iocb, iter, nr_pages: bio_max_segs(nr_segs: nr_pages)); |
380 | } |
381 | |
382 | static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
383 | unsigned int flags, struct iomap *iomap, struct iomap *srcmap) |
384 | { |
385 | struct block_device *bdev = I_BDEV(inode); |
386 | loff_t isize = i_size_read(inode); |
387 | |
388 | iomap->bdev = bdev; |
389 | iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev)); |
390 | if (iomap->offset >= isize) |
391 | return -EIO; |
392 | iomap->type = IOMAP_MAPPED; |
393 | iomap->addr = iomap->offset; |
394 | iomap->length = isize - iomap->offset; |
395 | iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */ |
396 | return 0; |
397 | } |
398 | |
399 | static const struct iomap_ops blkdev_iomap_ops = { |
400 | .iomap_begin = blkdev_iomap_begin, |
401 | }; |
402 | |
403 | #ifdef CONFIG_BUFFER_HEAD |
404 | static int blkdev_get_block(struct inode *inode, sector_t iblock, |
405 | struct buffer_head *bh, int create) |
406 | { |
407 | bh->b_bdev = I_BDEV(inode); |
408 | bh->b_blocknr = iblock; |
409 | set_buffer_mapped(bh); |
410 | return 0; |
411 | } |
412 | |
413 | static int blkdev_writepage(struct page *page, struct writeback_control *wbc) |
414 | { |
415 | return block_write_full_page(page, get_block: blkdev_get_block, wbc); |
416 | } |
417 | |
418 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
419 | { |
420 | return block_read_full_folio(folio, blkdev_get_block); |
421 | } |
422 | |
423 | static void blkdev_readahead(struct readahead_control *rac) |
424 | { |
425 | mpage_readahead(rac, get_block: blkdev_get_block); |
426 | } |
427 | |
428 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, |
429 | loff_t pos, unsigned len, struct page **pagep, void **fsdata) |
430 | { |
431 | return block_write_begin(mapping, pos, len, pagep, get_block: blkdev_get_block); |
432 | } |
433 | |
434 | static int blkdev_write_end(struct file *file, struct address_space *mapping, |
435 | loff_t pos, unsigned len, unsigned copied, struct page *page, |
436 | void *fsdata) |
437 | { |
438 | int ret; |
439 | ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); |
440 | |
441 | unlock_page(page); |
442 | put_page(page); |
443 | |
444 | return ret; |
445 | } |
446 | |
447 | const struct address_space_operations def_blk_aops = { |
448 | .dirty_folio = block_dirty_folio, |
449 | .invalidate_folio = block_invalidate_folio, |
450 | .read_folio = blkdev_read_folio, |
451 | .readahead = blkdev_readahead, |
452 | .writepage = blkdev_writepage, |
453 | .write_begin = blkdev_write_begin, |
454 | .write_end = blkdev_write_end, |
455 | .migrate_folio = buffer_migrate_folio_norefs, |
456 | .is_dirty_writeback = buffer_check_dirty_writeback, |
457 | }; |
458 | #else /* CONFIG_BUFFER_HEAD */ |
459 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
460 | { |
461 | return iomap_read_folio(folio, &blkdev_iomap_ops); |
462 | } |
463 | |
464 | static void blkdev_readahead(struct readahead_control *rac) |
465 | { |
466 | iomap_readahead(rac, &blkdev_iomap_ops); |
467 | } |
468 | |
469 | static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc, |
470 | struct inode *inode, loff_t offset) |
471 | { |
472 | loff_t isize = i_size_read(inode); |
473 | |
474 | if (WARN_ON_ONCE(offset >= isize)) |
475 | return -EIO; |
476 | if (offset >= wpc->iomap.offset && |
477 | offset < wpc->iomap.offset + wpc->iomap.length) |
478 | return 0; |
479 | return blkdev_iomap_begin(inode, offset, isize - offset, |
480 | IOMAP_WRITE, &wpc->iomap, NULL); |
481 | } |
482 | |
483 | static const struct iomap_writeback_ops blkdev_writeback_ops = { |
484 | .map_blocks = blkdev_map_blocks, |
485 | }; |
486 | |
487 | static int blkdev_writepages(struct address_space *mapping, |
488 | struct writeback_control *wbc) |
489 | { |
490 | struct iomap_writepage_ctx wpc = { }; |
491 | |
492 | return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops); |
493 | } |
494 | |
495 | const struct address_space_operations def_blk_aops = { |
496 | .dirty_folio = filemap_dirty_folio, |
497 | .release_folio = iomap_release_folio, |
498 | .invalidate_folio = iomap_invalidate_folio, |
499 | .read_folio = blkdev_read_folio, |
500 | .readahead = blkdev_readahead, |
501 | .writepages = blkdev_writepages, |
502 | .is_partially_uptodate = iomap_is_partially_uptodate, |
503 | .error_remove_page = generic_error_remove_page, |
504 | .migrate_folio = filemap_migrate_folio, |
505 | }; |
506 | #endif /* CONFIG_BUFFER_HEAD */ |
507 | |
508 | /* |
509 | * for a block special file file_inode(file)->i_size is zero |
510 | * so we compute the size by hand (just as in block_read/write above) |
511 | */ |
512 | static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) |
513 | { |
514 | struct inode *bd_inode = bdev_file_inode(file); |
515 | loff_t retval; |
516 | |
517 | inode_lock(inode: bd_inode); |
518 | retval = fixed_size_llseek(file, offset, whence, size: i_size_read(inode: bd_inode)); |
519 | inode_unlock(inode: bd_inode); |
520 | return retval; |
521 | } |
522 | |
523 | static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, |
524 | int datasync) |
525 | { |
526 | struct block_device *bdev = I_BDEV(inode: filp->f_mapping->host); |
527 | int error; |
528 | |
529 | error = file_write_and_wait_range(file: filp, start, end); |
530 | if (error) |
531 | return error; |
532 | |
533 | /* |
534 | * There is no need to serialise calls to blkdev_issue_flush with |
535 | * i_mutex and doing so causes performance issues with concurrent |
536 | * O_SYNC writers to a block device. |
537 | */ |
538 | error = blkdev_issue_flush(bdev); |
539 | if (error == -EOPNOTSUPP) |
540 | error = 0; |
541 | |
542 | return error; |
543 | } |
544 | |
545 | /** |
546 | * file_to_blk_mode - get block open flags from file flags |
547 | * @file: file whose open flags should be converted |
548 | * |
549 | * Look at file open flags and generate corresponding block open flags from |
550 | * them. The function works both for file just being open (e.g. during ->open |
551 | * callback) and for file that is already open. This is actually non-trivial |
552 | * (see comment in the function). |
553 | */ |
554 | blk_mode_t file_to_blk_mode(struct file *file) |
555 | { |
556 | blk_mode_t mode = 0; |
557 | struct bdev_handle *handle = file->private_data; |
558 | |
559 | if (file->f_mode & FMODE_READ) |
560 | mode |= BLK_OPEN_READ; |
561 | if (file->f_mode & FMODE_WRITE) |
562 | mode |= BLK_OPEN_WRITE; |
563 | /* |
564 | * do_dentry_open() clears O_EXCL from f_flags, use handle->mode to |
565 | * determine whether the open was exclusive for already open files. |
566 | */ |
567 | if (handle) |
568 | mode |= handle->mode & BLK_OPEN_EXCL; |
569 | else if (file->f_flags & O_EXCL) |
570 | mode |= BLK_OPEN_EXCL; |
571 | if (file->f_flags & O_NDELAY) |
572 | mode |= BLK_OPEN_NDELAY; |
573 | |
574 | /* |
575 | * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy |
576 | * driver has historically allowed ioctls as if the file was opened for |
577 | * writing, but does not allow and actual reads or writes. |
578 | */ |
579 | if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY)) |
580 | mode |= BLK_OPEN_WRITE_IOCTL; |
581 | |
582 | return mode; |
583 | } |
584 | |
585 | static int blkdev_open(struct inode *inode, struct file *filp) |
586 | { |
587 | struct bdev_handle *handle; |
588 | blk_mode_t mode; |
589 | |
590 | /* |
591 | * Preserve backwards compatibility and allow large file access |
592 | * even if userspace doesn't ask for it explicitly. Some mkfs |
593 | * binary needs it. We might want to drop this workaround |
594 | * during an unstable branch. |
595 | */ |
596 | filp->f_flags |= O_LARGEFILE; |
597 | filp->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT; |
598 | |
599 | mode = file_to_blk_mode(file: filp); |
600 | handle = bdev_open_by_dev(dev: inode->i_rdev, mode, |
601 | holder: mode & BLK_OPEN_EXCL ? filp : NULL, NULL); |
602 | if (IS_ERR(ptr: handle)) |
603 | return PTR_ERR(ptr: handle); |
604 | |
605 | if (bdev_nowait(bdev: handle->bdev)) |
606 | filp->f_mode |= FMODE_NOWAIT; |
607 | |
608 | filp->f_mapping = handle->bdev->bd_inode->i_mapping; |
609 | filp->f_wb_err = filemap_sample_wb_err(mapping: filp->f_mapping); |
610 | filp->private_data = handle; |
611 | return 0; |
612 | } |
613 | |
614 | static int blkdev_release(struct inode *inode, struct file *filp) |
615 | { |
616 | bdev_release(handle: filp->private_data); |
617 | return 0; |
618 | } |
619 | |
620 | static ssize_t |
621 | blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from) |
622 | { |
623 | size_t count = iov_iter_count(i: from); |
624 | ssize_t written; |
625 | |
626 | written = kiocb_invalidate_pages(iocb, count); |
627 | if (written) { |
628 | if (written == -EBUSY) |
629 | return 0; |
630 | return written; |
631 | } |
632 | |
633 | written = blkdev_direct_IO(iocb, iter: from); |
634 | if (written > 0) { |
635 | kiocb_invalidate_post_direct_write(iocb, count); |
636 | iocb->ki_pos += written; |
637 | count -= written; |
638 | } |
639 | if (written != -EIOCBQUEUED) |
640 | iov_iter_revert(i: from, bytes: count - iov_iter_count(i: from)); |
641 | return written; |
642 | } |
643 | |
644 | static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from) |
645 | { |
646 | return iomap_file_buffered_write(iocb, from, ops: &blkdev_iomap_ops); |
647 | } |
648 | |
649 | /* |
650 | * Write data to the block device. Only intended for the block device itself |
651 | * and the raw driver which basically is a fake block device. |
652 | * |
653 | * Does not take i_mutex for the write and thus is not for general purpose |
654 | * use. |
655 | */ |
656 | static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
657 | { |
658 | struct file *file = iocb->ki_filp; |
659 | struct block_device *bdev = I_BDEV(inode: file->f_mapping->host); |
660 | struct inode *bd_inode = bdev->bd_inode; |
661 | loff_t size = bdev_nr_bytes(bdev); |
662 | size_t shorted = 0; |
663 | ssize_t ret; |
664 | |
665 | if (bdev_read_only(bdev)) |
666 | return -EPERM; |
667 | |
668 | if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(dev: bd_inode->i_rdev)) |
669 | return -ETXTBSY; |
670 | |
671 | if (!iov_iter_count(i: from)) |
672 | return 0; |
673 | |
674 | if (iocb->ki_pos >= size) |
675 | return -ENOSPC; |
676 | |
677 | if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) |
678 | return -EOPNOTSUPP; |
679 | |
680 | size -= iocb->ki_pos; |
681 | if (iov_iter_count(i: from) > size) { |
682 | shorted = iov_iter_count(i: from) - size; |
683 | iov_iter_truncate(i: from, count: size); |
684 | } |
685 | |
686 | ret = file_update_time(file); |
687 | if (ret) |
688 | return ret; |
689 | |
690 | if (iocb->ki_flags & IOCB_DIRECT) { |
691 | ret = blkdev_direct_write(iocb, from); |
692 | if (ret >= 0 && iov_iter_count(i: from)) |
693 | ret = direct_write_fallback(iocb, iter: from, direct_written: ret, |
694 | buffered_written: blkdev_buffered_write(iocb, from)); |
695 | } else { |
696 | ret = blkdev_buffered_write(iocb, from); |
697 | } |
698 | |
699 | if (ret > 0) |
700 | ret = generic_write_sync(iocb, count: ret); |
701 | iov_iter_reexpand(i: from, count: iov_iter_count(i: from) + shorted); |
702 | return ret; |
703 | } |
704 | |
705 | static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) |
706 | { |
707 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
708 | loff_t size = bdev_nr_bytes(bdev); |
709 | loff_t pos = iocb->ki_pos; |
710 | size_t shorted = 0; |
711 | ssize_t ret = 0; |
712 | size_t count; |
713 | |
714 | if (unlikely(pos + iov_iter_count(to) > size)) { |
715 | if (pos >= size) |
716 | return 0; |
717 | size -= pos; |
718 | shorted = iov_iter_count(i: to) - size; |
719 | iov_iter_truncate(i: to, count: size); |
720 | } |
721 | |
722 | count = iov_iter_count(i: to); |
723 | if (!count) |
724 | goto reexpand; /* skip atime */ |
725 | |
726 | if (iocb->ki_flags & IOCB_DIRECT) { |
727 | ret = kiocb_write_and_wait(iocb, count); |
728 | if (ret < 0) |
729 | goto reexpand; |
730 | file_accessed(file: iocb->ki_filp); |
731 | |
732 | ret = blkdev_direct_IO(iocb, iter: to); |
733 | if (ret >= 0) { |
734 | iocb->ki_pos += ret; |
735 | count -= ret; |
736 | } |
737 | iov_iter_revert(i: to, bytes: count - iov_iter_count(i: to)); |
738 | if (ret < 0 || !count) |
739 | goto reexpand; |
740 | } |
741 | |
742 | ret = filemap_read(iocb, to, already_read: ret); |
743 | |
744 | reexpand: |
745 | if (unlikely(shorted)) |
746 | iov_iter_reexpand(i: to, count: iov_iter_count(i: to) + shorted); |
747 | return ret; |
748 | } |
749 | |
750 | #define BLKDEV_FALLOC_FL_SUPPORTED \ |
751 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ |
752 | FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) |
753 | |
754 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, |
755 | loff_t len) |
756 | { |
757 | struct inode *inode = bdev_file_inode(file); |
758 | struct block_device *bdev = I_BDEV(inode); |
759 | loff_t end = start + len - 1; |
760 | loff_t isize; |
761 | int error; |
762 | |
763 | /* Fail if we don't recognize the flags. */ |
764 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) |
765 | return -EOPNOTSUPP; |
766 | |
767 | /* Don't go off the end of the device. */ |
768 | isize = bdev_nr_bytes(bdev); |
769 | if (start >= isize) |
770 | return -EINVAL; |
771 | if (end >= isize) { |
772 | if (mode & FALLOC_FL_KEEP_SIZE) { |
773 | len = isize - start; |
774 | end = start + len - 1; |
775 | } else |
776 | return -EINVAL; |
777 | } |
778 | |
779 | /* |
780 | * Don't allow IO that isn't aligned to logical block size. |
781 | */ |
782 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) |
783 | return -EINVAL; |
784 | |
785 | filemap_invalidate_lock(mapping: inode->i_mapping); |
786 | |
787 | /* |
788 | * Invalidate the page cache, including dirty pages, for valid |
789 | * de-allocate mode calls to fallocate(). |
790 | */ |
791 | switch (mode) { |
792 | case FALLOC_FL_ZERO_RANGE: |
793 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: |
794 | error = truncate_bdev_range(bdev, mode: file_to_blk_mode(file), lstart: start, lend: end); |
795 | if (error) |
796 | goto fail; |
797 | |
798 | error = blkdev_issue_zeroout(bdev, sector: start >> SECTOR_SHIFT, |
799 | nr_sects: len >> SECTOR_SHIFT, GFP_KERNEL, |
800 | BLKDEV_ZERO_NOUNMAP); |
801 | break; |
802 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: |
803 | error = truncate_bdev_range(bdev, mode: file_to_blk_mode(file), lstart: start, lend: end); |
804 | if (error) |
805 | goto fail; |
806 | |
807 | error = blkdev_issue_zeroout(bdev, sector: start >> SECTOR_SHIFT, |
808 | nr_sects: len >> SECTOR_SHIFT, GFP_KERNEL, |
809 | BLKDEV_ZERO_NOFALLBACK); |
810 | break; |
811 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: |
812 | error = truncate_bdev_range(bdev, mode: file_to_blk_mode(file), lstart: start, lend: end); |
813 | if (error) |
814 | goto fail; |
815 | |
816 | error = blkdev_issue_discard(bdev, sector: start >> SECTOR_SHIFT, |
817 | nr_sects: len >> SECTOR_SHIFT, GFP_KERNEL); |
818 | break; |
819 | default: |
820 | error = -EOPNOTSUPP; |
821 | } |
822 | |
823 | fail: |
824 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
825 | return error; |
826 | } |
827 | |
828 | static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) |
829 | { |
830 | struct inode *bd_inode = bdev_file_inode(file); |
831 | |
832 | if (bdev_read_only(bdev: I_BDEV(inode: bd_inode))) |
833 | return generic_file_readonly_mmap(file, vma); |
834 | |
835 | return generic_file_mmap(file, vma); |
836 | } |
837 | |
838 | const struct file_operations def_blk_fops = { |
839 | .open = blkdev_open, |
840 | .release = blkdev_release, |
841 | .llseek = blkdev_llseek, |
842 | .read_iter = blkdev_read_iter, |
843 | .write_iter = blkdev_write_iter, |
844 | .iopoll = iocb_bio_iopoll, |
845 | .mmap = blkdev_mmap, |
846 | .fsync = blkdev_fsync, |
847 | .unlocked_ioctl = blkdev_ioctl, |
848 | #ifdef CONFIG_COMPAT |
849 | .compat_ioctl = compat_blkdev_ioctl, |
850 | #endif |
851 | .splice_read = filemap_splice_read, |
852 | .splice_write = iter_file_splice_write, |
853 | .fallocate = blkdev_fallocate, |
854 | }; |
855 | |
856 | static __init int blkdev_init(void) |
857 | { |
858 | return bioset_init(&blkdev_dio_pool, 4, |
859 | offsetof(struct blkdev_dio, bio), |
860 | flags: BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); |
861 | } |
862 | module_init(blkdev_init); |
863 | |