1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. |
4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
5 | * |
6 | * This file is released under the GPL. |
7 | */ |
8 | |
9 | #include "dm-core.h" |
10 | #include "dm-rq.h" |
11 | #include "dm-uevent.h" |
12 | #include "dm-ima.h" |
13 | |
14 | #include <linux/init.h> |
15 | #include <linux/module.h> |
16 | #include <linux/mutex.h> |
17 | #include <linux/sched/mm.h> |
18 | #include <linux/sched/signal.h> |
19 | #include <linux/blkpg.h> |
20 | #include <linux/bio.h> |
21 | #include <linux/mempool.h> |
22 | #include <linux/dax.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/idr.h> |
25 | #include <linux/uio.h> |
26 | #include <linux/hdreg.h> |
27 | #include <linux/delay.h> |
28 | #include <linux/wait.h> |
29 | #include <linux/pr.h> |
30 | #include <linux/refcount.h> |
31 | #include <linux/part_stat.h> |
32 | #include <linux/blk-crypto.h> |
33 | #include <linux/blk-crypto-profile.h> |
34 | |
35 | #define DM_MSG_PREFIX "core" |
36 | |
37 | /* |
38 | * Cookies are numeric values sent with CHANGE and REMOVE |
39 | * uevents while resuming, removing or renaming the device. |
40 | */ |
41 | #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" |
42 | #define DM_COOKIE_LENGTH 24 |
43 | |
44 | /* |
45 | * For REQ_POLLED fs bio, this flag is set if we link mapped underlying |
46 | * dm_io into one list, and reuse bio->bi_private as the list head. Before |
47 | * ending this fs bio, we will recover its ->bi_private. |
48 | */ |
49 | #define REQ_DM_POLL_LIST REQ_DRV |
50 | |
51 | static const char *_name = DM_NAME; |
52 | |
53 | static unsigned int major; |
54 | static unsigned int _major; |
55 | |
56 | static DEFINE_IDR(_minor_idr); |
57 | |
58 | static DEFINE_SPINLOCK(_minor_lock); |
59 | |
60 | static void do_deferred_remove(struct work_struct *w); |
61 | |
62 | static DECLARE_WORK(deferred_remove_work, do_deferred_remove); |
63 | |
64 | static struct workqueue_struct *deferred_remove_workqueue; |
65 | |
66 | atomic_t dm_global_event_nr = ATOMIC_INIT(0); |
67 | DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); |
68 | |
69 | void dm_issue_global_event(void) |
70 | { |
71 | atomic_inc(v: &dm_global_event_nr); |
72 | wake_up(&dm_global_eventq); |
73 | } |
74 | |
75 | DEFINE_STATIC_KEY_FALSE(stats_enabled); |
76 | DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); |
77 | DEFINE_STATIC_KEY_FALSE(zoned_enabled); |
78 | |
79 | /* |
80 | * One of these is allocated (on-stack) per original bio. |
81 | */ |
82 | struct clone_info { |
83 | struct dm_table *map; |
84 | struct bio *bio; |
85 | struct dm_io *io; |
86 | sector_t sector; |
87 | unsigned int sector_count; |
88 | bool is_abnormal_io:1; |
89 | bool submit_as_polled:1; |
90 | }; |
91 | |
92 | static inline struct dm_target_io *clone_to_tio(struct bio *clone) |
93 | { |
94 | return container_of(clone, struct dm_target_io, clone); |
95 | } |
96 | |
97 | void *dm_per_bio_data(struct bio *bio, size_t data_size) |
98 | { |
99 | if (!dm_tio_flagged(tio: clone_to_tio(clone: bio), bit: DM_TIO_INSIDE_DM_IO)) |
100 | return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; |
101 | return (char *)bio - DM_IO_BIO_OFFSET - data_size; |
102 | } |
103 | EXPORT_SYMBOL_GPL(dm_per_bio_data); |
104 | |
105 | struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) |
106 | { |
107 | struct dm_io *io = (struct dm_io *)((char *)data + data_size); |
108 | |
109 | if (io->magic == DM_IO_MAGIC) |
110 | return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); |
111 | BUG_ON(io->magic != DM_TIO_MAGIC); |
112 | return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); |
113 | } |
114 | EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); |
115 | |
116 | unsigned int dm_bio_get_target_bio_nr(const struct bio *bio) |
117 | { |
118 | return container_of(bio, struct dm_target_io, clone)->target_bio_nr; |
119 | } |
120 | EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); |
121 | |
122 | #define MINOR_ALLOCED ((void *)-1) |
123 | |
124 | #define DM_NUMA_NODE NUMA_NO_NODE |
125 | static int dm_numa_node = DM_NUMA_NODE; |
126 | |
127 | #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) |
128 | static int swap_bios = DEFAULT_SWAP_BIOS; |
129 | static int get_swap_bios(void) |
130 | { |
131 | int latch = READ_ONCE(swap_bios); |
132 | |
133 | if (unlikely(latch <= 0)) |
134 | latch = DEFAULT_SWAP_BIOS; |
135 | return latch; |
136 | } |
137 | |
138 | struct table_device { |
139 | struct list_head list; |
140 | refcount_t count; |
141 | struct dm_dev dm_dev; |
142 | }; |
143 | |
144 | /* |
145 | * Bio-based DM's mempools' reserved IOs set by the user. |
146 | */ |
147 | #define RESERVED_BIO_BASED_IOS 16 |
148 | static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; |
149 | |
150 | static int __dm_get_module_param_int(int *module_param, int min, int max) |
151 | { |
152 | int param = READ_ONCE(*module_param); |
153 | int modified_param = 0; |
154 | bool modified = true; |
155 | |
156 | if (param < min) |
157 | modified_param = min; |
158 | else if (param > max) |
159 | modified_param = max; |
160 | else |
161 | modified = false; |
162 | |
163 | if (modified) { |
164 | (void)cmpxchg(module_param, param, modified_param); |
165 | param = modified_param; |
166 | } |
167 | |
168 | return param; |
169 | } |
170 | |
171 | unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max) |
172 | { |
173 | unsigned int param = READ_ONCE(*module_param); |
174 | unsigned int modified_param = 0; |
175 | |
176 | if (!param) |
177 | modified_param = def; |
178 | else if (param > max) |
179 | modified_param = max; |
180 | |
181 | if (modified_param) { |
182 | (void)cmpxchg(module_param, param, modified_param); |
183 | param = modified_param; |
184 | } |
185 | |
186 | return param; |
187 | } |
188 | |
189 | unsigned int dm_get_reserved_bio_based_ios(void) |
190 | { |
191 | return __dm_get_module_param(module_param: &reserved_bio_based_ios, |
192 | RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); |
193 | } |
194 | EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); |
195 | |
196 | static unsigned int dm_get_numa_node(void) |
197 | { |
198 | return __dm_get_module_param_int(module_param: &dm_numa_node, |
199 | DM_NUMA_NODE, num_online_nodes() - 1); |
200 | } |
201 | |
202 | static int __init local_init(void) |
203 | { |
204 | int r; |
205 | |
206 | r = dm_uevent_init(); |
207 | if (r) |
208 | return r; |
209 | |
210 | deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove" , 0); |
211 | if (!deferred_remove_workqueue) { |
212 | r = -ENOMEM; |
213 | goto out_uevent_exit; |
214 | } |
215 | |
216 | _major = major; |
217 | r = register_blkdev(_major, _name); |
218 | if (r < 0) |
219 | goto out_free_workqueue; |
220 | |
221 | if (!_major) |
222 | _major = r; |
223 | |
224 | return 0; |
225 | |
226 | out_free_workqueue: |
227 | destroy_workqueue(wq: deferred_remove_workqueue); |
228 | out_uevent_exit: |
229 | dm_uevent_exit(); |
230 | |
231 | return r; |
232 | } |
233 | |
234 | static void local_exit(void) |
235 | { |
236 | destroy_workqueue(wq: deferred_remove_workqueue); |
237 | |
238 | unregister_blkdev(major: _major, name: _name); |
239 | dm_uevent_exit(); |
240 | |
241 | _major = 0; |
242 | |
243 | DMINFO("cleaned up" ); |
244 | } |
245 | |
246 | static int (*_inits[])(void) __initdata = { |
247 | local_init, |
248 | dm_target_init, |
249 | dm_linear_init, |
250 | dm_stripe_init, |
251 | dm_io_init, |
252 | dm_kcopyd_init, |
253 | dm_interface_init, |
254 | dm_statistics_init, |
255 | }; |
256 | |
257 | static void (*_exits[])(void) = { |
258 | local_exit, |
259 | dm_target_exit, |
260 | dm_linear_exit, |
261 | dm_stripe_exit, |
262 | dm_io_exit, |
263 | dm_kcopyd_exit, |
264 | dm_interface_exit, |
265 | dm_statistics_exit, |
266 | }; |
267 | |
268 | static int __init dm_init(void) |
269 | { |
270 | const int count = ARRAY_SIZE(_inits); |
271 | int r, i; |
272 | |
273 | #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) |
274 | DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." |
275 | " Duplicate IMA measurements will not be recorded in the IMA log." ); |
276 | #endif |
277 | |
278 | for (i = 0; i < count; i++) { |
279 | r = _inits[i](); |
280 | if (r) |
281 | goto bad; |
282 | } |
283 | |
284 | return 0; |
285 | bad: |
286 | while (i--) |
287 | _exits[i](); |
288 | |
289 | return r; |
290 | } |
291 | |
292 | static void __exit dm_exit(void) |
293 | { |
294 | int i = ARRAY_SIZE(_exits); |
295 | |
296 | while (i--) |
297 | _exits[i](); |
298 | |
299 | /* |
300 | * Should be empty by this point. |
301 | */ |
302 | idr_destroy(&_minor_idr); |
303 | } |
304 | |
305 | /* |
306 | * Block device functions |
307 | */ |
308 | int dm_deleting_md(struct mapped_device *md) |
309 | { |
310 | return test_bit(DMF_DELETING, &md->flags); |
311 | } |
312 | |
313 | static int dm_blk_open(struct gendisk *disk, blk_mode_t mode) |
314 | { |
315 | struct mapped_device *md; |
316 | |
317 | spin_lock(lock: &_minor_lock); |
318 | |
319 | md = disk->private_data; |
320 | if (!md) |
321 | goto out; |
322 | |
323 | if (test_bit(DMF_FREEING, &md->flags) || |
324 | dm_deleting_md(md)) { |
325 | md = NULL; |
326 | goto out; |
327 | } |
328 | |
329 | dm_get(md); |
330 | atomic_inc(v: &md->open_count); |
331 | out: |
332 | spin_unlock(lock: &_minor_lock); |
333 | |
334 | return md ? 0 : -ENXIO; |
335 | } |
336 | |
337 | static void dm_blk_close(struct gendisk *disk) |
338 | { |
339 | struct mapped_device *md; |
340 | |
341 | spin_lock(lock: &_minor_lock); |
342 | |
343 | md = disk->private_data; |
344 | if (WARN_ON(!md)) |
345 | goto out; |
346 | |
347 | if (atomic_dec_and_test(v: &md->open_count) && |
348 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) |
349 | queue_work(wq: deferred_remove_workqueue, work: &deferred_remove_work); |
350 | |
351 | dm_put(md); |
352 | out: |
353 | spin_unlock(lock: &_minor_lock); |
354 | } |
355 | |
356 | int dm_open_count(struct mapped_device *md) |
357 | { |
358 | return atomic_read(v: &md->open_count); |
359 | } |
360 | |
361 | /* |
362 | * Guarantees nothing is using the device before it's deleted. |
363 | */ |
364 | int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) |
365 | { |
366 | int r = 0; |
367 | |
368 | spin_lock(lock: &_minor_lock); |
369 | |
370 | if (dm_open_count(md)) { |
371 | r = -EBUSY; |
372 | if (mark_deferred) |
373 | set_bit(DMF_DEFERRED_REMOVE, addr: &md->flags); |
374 | } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) |
375 | r = -EEXIST; |
376 | else |
377 | set_bit(DMF_DELETING, addr: &md->flags); |
378 | |
379 | spin_unlock(lock: &_minor_lock); |
380 | |
381 | return r; |
382 | } |
383 | |
384 | int dm_cancel_deferred_remove(struct mapped_device *md) |
385 | { |
386 | int r = 0; |
387 | |
388 | spin_lock(lock: &_minor_lock); |
389 | |
390 | if (test_bit(DMF_DELETING, &md->flags)) |
391 | r = -EBUSY; |
392 | else |
393 | clear_bit(DMF_DEFERRED_REMOVE, addr: &md->flags); |
394 | |
395 | spin_unlock(lock: &_minor_lock); |
396 | |
397 | return r; |
398 | } |
399 | |
400 | static void do_deferred_remove(struct work_struct *w) |
401 | { |
402 | dm_deferred_remove(); |
403 | } |
404 | |
405 | static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
406 | { |
407 | struct mapped_device *md = bdev->bd_disk->private_data; |
408 | |
409 | return dm_get_geometry(md, geo); |
410 | } |
411 | |
412 | static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, |
413 | struct block_device **bdev) |
414 | { |
415 | struct dm_target *ti; |
416 | struct dm_table *map; |
417 | int r; |
418 | |
419 | retry: |
420 | r = -ENOTTY; |
421 | map = dm_get_live_table(md, srcu_idx); |
422 | if (!map || !dm_table_get_size(t: map)) |
423 | return r; |
424 | |
425 | /* We only support devices that have a single target */ |
426 | if (map->num_targets != 1) |
427 | return r; |
428 | |
429 | ti = dm_table_get_target(t: map, index: 0); |
430 | if (!ti->type->prepare_ioctl) |
431 | return r; |
432 | |
433 | if (dm_suspended_md(md)) |
434 | return -EAGAIN; |
435 | |
436 | r = ti->type->prepare_ioctl(ti, bdev); |
437 | if (r == -ENOTCONN && !fatal_signal_pending(current)) { |
438 | dm_put_live_table(md, srcu_idx: *srcu_idx); |
439 | fsleep(usecs: 10000); |
440 | goto retry; |
441 | } |
442 | |
443 | return r; |
444 | } |
445 | |
446 | static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) |
447 | { |
448 | dm_put_live_table(md, srcu_idx); |
449 | } |
450 | |
451 | static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode, |
452 | unsigned int cmd, unsigned long arg) |
453 | { |
454 | struct mapped_device *md = bdev->bd_disk->private_data; |
455 | int r, srcu_idx; |
456 | |
457 | r = dm_prepare_ioctl(md, srcu_idx: &srcu_idx, bdev: &bdev); |
458 | if (r < 0) |
459 | goto out; |
460 | |
461 | if (r > 0) { |
462 | /* |
463 | * Target determined this ioctl is being issued against a |
464 | * subset of the parent bdev; require extra privileges. |
465 | */ |
466 | if (!capable(CAP_SYS_RAWIO)) { |
467 | DMDEBUG_LIMIT( |
468 | "%s: sending ioctl %x to DM device without required privilege." , |
469 | current->comm, cmd); |
470 | r = -ENOIOCTLCMD; |
471 | goto out; |
472 | } |
473 | } |
474 | |
475 | if (!bdev->bd_disk->fops->ioctl) |
476 | r = -ENOTTY; |
477 | else |
478 | r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); |
479 | out: |
480 | dm_unprepare_ioctl(md, srcu_idx); |
481 | return r; |
482 | } |
483 | |
484 | u64 dm_start_time_ns_from_clone(struct bio *bio) |
485 | { |
486 | return jiffies_to_nsecs(j: clone_to_tio(clone: bio)->io->start_time); |
487 | } |
488 | EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); |
489 | |
490 | static inline bool bio_is_flush_with_data(struct bio *bio) |
491 | { |
492 | return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); |
493 | } |
494 | |
495 | static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio) |
496 | { |
497 | /* |
498 | * If REQ_PREFLUSH set, don't account payload, it will be |
499 | * submitted (and accounted) after this flush completes. |
500 | */ |
501 | if (bio_is_flush_with_data(bio)) |
502 | return 0; |
503 | if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) |
504 | return io->sectors; |
505 | return bio_sectors(bio); |
506 | } |
507 | |
508 | static void dm_io_acct(struct dm_io *io, bool end) |
509 | { |
510 | struct bio *bio = io->orig_bio; |
511 | |
512 | if (dm_io_flagged(io, bit: DM_IO_BLK_STAT)) { |
513 | if (!end) |
514 | bdev_start_io_acct(bdev: bio->bi_bdev, op: bio_op(bio), |
515 | start_time: io->start_time); |
516 | else |
517 | bdev_end_io_acct(bdev: bio->bi_bdev, op: bio_op(bio), |
518 | sectors: dm_io_sectors(io, bio), |
519 | start_time: io->start_time); |
520 | } |
521 | |
522 | if (static_branch_unlikely(&stats_enabled) && |
523 | unlikely(dm_stats_used(&io->md->stats))) { |
524 | sector_t sector; |
525 | |
526 | if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) |
527 | sector = bio_end_sector(bio) - io->sector_offset; |
528 | else |
529 | sector = bio->bi_iter.bi_sector; |
530 | |
531 | dm_stats_account_io(stats: &io->md->stats, bio_data_dir(bio), |
532 | bi_sector: sector, bi_sectors: dm_io_sectors(io, bio), |
533 | end, start_time: io->start_time, aux: &io->stats_aux); |
534 | } |
535 | } |
536 | |
537 | static void __dm_start_io_acct(struct dm_io *io) |
538 | { |
539 | dm_io_acct(io, end: false); |
540 | } |
541 | |
542 | static void dm_start_io_acct(struct dm_io *io, struct bio *clone) |
543 | { |
544 | /* |
545 | * Ensure IO accounting is only ever started once. |
546 | */ |
547 | if (dm_io_flagged(io, bit: DM_IO_ACCOUNTED)) |
548 | return; |
549 | |
550 | /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */ |
551 | if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { |
552 | dm_io_set_flag(io, bit: DM_IO_ACCOUNTED); |
553 | } else { |
554 | unsigned long flags; |
555 | /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ |
556 | spin_lock_irqsave(&io->lock, flags); |
557 | if (dm_io_flagged(io, bit: DM_IO_ACCOUNTED)) { |
558 | spin_unlock_irqrestore(lock: &io->lock, flags); |
559 | return; |
560 | } |
561 | dm_io_set_flag(io, bit: DM_IO_ACCOUNTED); |
562 | spin_unlock_irqrestore(lock: &io->lock, flags); |
563 | } |
564 | |
565 | __dm_start_io_acct(io); |
566 | } |
567 | |
568 | static void dm_end_io_acct(struct dm_io *io) |
569 | { |
570 | dm_io_acct(io, end: true); |
571 | } |
572 | |
573 | static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) |
574 | { |
575 | struct dm_io *io; |
576 | struct dm_target_io *tio; |
577 | struct bio *clone; |
578 | |
579 | clone = bio_alloc_clone(NULL, bio_src: bio, gfp: gfp_mask, bs: &md->mempools->io_bs); |
580 | if (unlikely(!clone)) |
581 | return NULL; |
582 | tio = clone_to_tio(clone); |
583 | tio->flags = 0; |
584 | dm_tio_set_flag(tio, bit: DM_TIO_INSIDE_DM_IO); |
585 | tio->io = NULL; |
586 | |
587 | io = container_of(tio, struct dm_io, tio); |
588 | io->magic = DM_IO_MAGIC; |
589 | io->status = BLK_STS_OK; |
590 | |
591 | /* one ref is for submission, the other is for completion */ |
592 | atomic_set(v: &io->io_count, i: 2); |
593 | this_cpu_inc(*md->pending_io); |
594 | io->orig_bio = bio; |
595 | io->md = md; |
596 | spin_lock_init(&io->lock); |
597 | io->start_time = jiffies; |
598 | io->flags = 0; |
599 | if (blk_queue_io_stat(md->queue)) |
600 | dm_io_set_flag(io, bit: DM_IO_BLK_STAT); |
601 | |
602 | if (static_branch_unlikely(&stats_enabled) && |
603 | unlikely(dm_stats_used(&md->stats))) |
604 | dm_stats_record_start(stats: &md->stats, aux: &io->stats_aux); |
605 | |
606 | return io; |
607 | } |
608 | |
609 | static void free_io(struct dm_io *io) |
610 | { |
611 | bio_put(&io->tio.clone); |
612 | } |
613 | |
614 | static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, |
615 | unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask) |
616 | { |
617 | struct mapped_device *md = ci->io->md; |
618 | struct dm_target_io *tio; |
619 | struct bio *clone; |
620 | |
621 | if (!ci->io->tio.io) { |
622 | /* the dm_target_io embedded in ci->io is available */ |
623 | tio = &ci->io->tio; |
624 | /* alloc_io() already initialized embedded clone */ |
625 | clone = &tio->clone; |
626 | } else { |
627 | clone = bio_alloc_clone(NULL, bio_src: ci->bio, gfp: gfp_mask, |
628 | bs: &md->mempools->bs); |
629 | if (!clone) |
630 | return NULL; |
631 | |
632 | /* REQ_DM_POLL_LIST shouldn't be inherited */ |
633 | clone->bi_opf &= ~REQ_DM_POLL_LIST; |
634 | |
635 | tio = clone_to_tio(clone); |
636 | tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ |
637 | } |
638 | |
639 | tio->magic = DM_TIO_MAGIC; |
640 | tio->io = ci->io; |
641 | tio->ti = ti; |
642 | tio->target_bio_nr = target_bio_nr; |
643 | tio->len_ptr = len; |
644 | tio->old_sector = 0; |
645 | |
646 | /* Set default bdev, but target must bio_set_dev() before issuing IO */ |
647 | clone->bi_bdev = md->disk->part0; |
648 | if (unlikely(ti->needs_bio_set_dev)) |
649 | bio_set_dev(bio: clone, bdev: md->disk->part0); |
650 | |
651 | if (len) { |
652 | clone->bi_iter.bi_size = to_bytes(n: *len); |
653 | if (bio_integrity(bio: clone)) |
654 | bio_integrity_trim(clone); |
655 | } |
656 | |
657 | return clone; |
658 | } |
659 | |
660 | static void free_tio(struct bio *clone) |
661 | { |
662 | if (dm_tio_flagged(tio: clone_to_tio(clone), bit: DM_TIO_INSIDE_DM_IO)) |
663 | return; |
664 | bio_put(clone); |
665 | } |
666 | |
667 | /* |
668 | * Add the bio to the list of deferred io. |
669 | */ |
670 | static void queue_io(struct mapped_device *md, struct bio *bio) |
671 | { |
672 | unsigned long flags; |
673 | |
674 | spin_lock_irqsave(&md->deferred_lock, flags); |
675 | bio_list_add(bl: &md->deferred, bio); |
676 | spin_unlock_irqrestore(lock: &md->deferred_lock, flags); |
677 | queue_work(wq: md->wq, work: &md->work); |
678 | } |
679 | |
680 | /* |
681 | * Everyone (including functions in this file), should use this |
682 | * function to access the md->map field, and make sure they call |
683 | * dm_put_live_table() when finished. |
684 | */ |
685 | struct dm_table *dm_get_live_table(struct mapped_device *md, |
686 | int *srcu_idx) __acquires(md->io_barrier) |
687 | { |
688 | *srcu_idx = srcu_read_lock(ssp: &md->io_barrier); |
689 | |
690 | return srcu_dereference(md->map, &md->io_barrier); |
691 | } |
692 | |
693 | void dm_put_live_table(struct mapped_device *md, |
694 | int srcu_idx) __releases(md->io_barrier) |
695 | { |
696 | srcu_read_unlock(ssp: &md->io_barrier, idx: srcu_idx); |
697 | } |
698 | |
699 | void dm_sync_table(struct mapped_device *md) |
700 | { |
701 | synchronize_srcu(ssp: &md->io_barrier); |
702 | synchronize_rcu_expedited(); |
703 | } |
704 | |
705 | /* |
706 | * A fast alternative to dm_get_live_table/dm_put_live_table. |
707 | * The caller must not block between these two functions. |
708 | */ |
709 | static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) |
710 | { |
711 | rcu_read_lock(); |
712 | return rcu_dereference(md->map); |
713 | } |
714 | |
715 | static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) |
716 | { |
717 | rcu_read_unlock(); |
718 | } |
719 | |
720 | static char *_dm_claim_ptr = "I belong to device-mapper" ; |
721 | |
722 | /* |
723 | * Open a table device so we can use it as a map destination. |
724 | */ |
725 | static struct table_device *open_table_device(struct mapped_device *md, |
726 | dev_t dev, blk_mode_t mode) |
727 | { |
728 | struct table_device *td; |
729 | struct bdev_handle *bdev_handle; |
730 | u64 part_off; |
731 | int r; |
732 | |
733 | td = kmalloc_node(size: sizeof(*td), GFP_KERNEL, node: md->numa_node_id); |
734 | if (!td) |
735 | return ERR_PTR(error: -ENOMEM); |
736 | refcount_set(r: &td->count, n: 1); |
737 | |
738 | bdev_handle = bdev_open_by_dev(dev, mode, holder: _dm_claim_ptr, NULL); |
739 | if (IS_ERR(ptr: bdev_handle)) { |
740 | r = PTR_ERR(ptr: bdev_handle); |
741 | goto out_free_td; |
742 | } |
743 | |
744 | /* |
745 | * We can be called before the dm disk is added. In that case we can't |
746 | * register the holder relation here. It will be done once add_disk was |
747 | * called. |
748 | */ |
749 | if (md->disk->slave_dir) { |
750 | r = bd_link_disk_holder(bdev: bdev_handle->bdev, disk: md->disk); |
751 | if (r) |
752 | goto out_blkdev_put; |
753 | } |
754 | |
755 | td->dm_dev.mode = mode; |
756 | td->dm_dev.bdev = bdev_handle->bdev; |
757 | td->dm_dev.bdev_handle = bdev_handle; |
758 | td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev: bdev_handle->bdev, start_off: &part_off, |
759 | NULL, NULL); |
760 | format_dev_t(td->dm_dev.name, dev); |
761 | list_add(new: &td->list, head: &md->table_devices); |
762 | return td; |
763 | |
764 | out_blkdev_put: |
765 | bdev_release(handle: bdev_handle); |
766 | out_free_td: |
767 | kfree(objp: td); |
768 | return ERR_PTR(error: r); |
769 | } |
770 | |
771 | /* |
772 | * Close a table device that we've been using. |
773 | */ |
774 | static void close_table_device(struct table_device *td, struct mapped_device *md) |
775 | { |
776 | if (md->disk->slave_dir) |
777 | bd_unlink_disk_holder(bdev: td->dm_dev.bdev, disk: md->disk); |
778 | bdev_release(handle: td->dm_dev.bdev_handle); |
779 | put_dax(dax_dev: td->dm_dev.dax_dev); |
780 | list_del(entry: &td->list); |
781 | kfree(objp: td); |
782 | } |
783 | |
784 | static struct table_device *find_table_device(struct list_head *l, dev_t dev, |
785 | blk_mode_t mode) |
786 | { |
787 | struct table_device *td; |
788 | |
789 | list_for_each_entry(td, l, list) |
790 | if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) |
791 | return td; |
792 | |
793 | return NULL; |
794 | } |
795 | |
796 | int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, |
797 | struct dm_dev **result) |
798 | { |
799 | struct table_device *td; |
800 | |
801 | mutex_lock(&md->table_devices_lock); |
802 | td = find_table_device(l: &md->table_devices, dev, mode); |
803 | if (!td) { |
804 | td = open_table_device(md, dev, mode); |
805 | if (IS_ERR(ptr: td)) { |
806 | mutex_unlock(lock: &md->table_devices_lock); |
807 | return PTR_ERR(ptr: td); |
808 | } |
809 | } else { |
810 | refcount_inc(r: &td->count); |
811 | } |
812 | mutex_unlock(lock: &md->table_devices_lock); |
813 | |
814 | *result = &td->dm_dev; |
815 | return 0; |
816 | } |
817 | |
818 | void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) |
819 | { |
820 | struct table_device *td = container_of(d, struct table_device, dm_dev); |
821 | |
822 | mutex_lock(&md->table_devices_lock); |
823 | if (refcount_dec_and_test(r: &td->count)) |
824 | close_table_device(td, md); |
825 | mutex_unlock(lock: &md->table_devices_lock); |
826 | } |
827 | |
828 | /* |
829 | * Get the geometry associated with a dm device |
830 | */ |
831 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) |
832 | { |
833 | *geo = md->geometry; |
834 | |
835 | return 0; |
836 | } |
837 | |
838 | /* |
839 | * Set the geometry of a device. |
840 | */ |
841 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) |
842 | { |
843 | sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; |
844 | |
845 | if (geo->start > sz) { |
846 | DMERR("Start sector is beyond the geometry limits." ); |
847 | return -EINVAL; |
848 | } |
849 | |
850 | md->geometry = *geo; |
851 | |
852 | return 0; |
853 | } |
854 | |
855 | static int __noflush_suspending(struct mapped_device *md) |
856 | { |
857 | return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
858 | } |
859 | |
860 | static void dm_requeue_add_io(struct dm_io *io, bool first_stage) |
861 | { |
862 | struct mapped_device *md = io->md; |
863 | |
864 | if (first_stage) { |
865 | struct dm_io *next = md->requeue_list; |
866 | |
867 | md->requeue_list = io; |
868 | io->next = next; |
869 | } else { |
870 | bio_list_add_head(bl: &md->deferred, bio: io->orig_bio); |
871 | } |
872 | } |
873 | |
874 | static void dm_kick_requeue(struct mapped_device *md, bool first_stage) |
875 | { |
876 | if (first_stage) |
877 | queue_work(wq: md->wq, work: &md->requeue_work); |
878 | else |
879 | queue_work(wq: md->wq, work: &md->work); |
880 | } |
881 | |
882 | /* |
883 | * Return true if the dm_io's original bio is requeued. |
884 | * io->status is updated with error if requeue disallowed. |
885 | */ |
886 | static bool dm_handle_requeue(struct dm_io *io, bool first_stage) |
887 | { |
888 | struct bio *bio = io->orig_bio; |
889 | bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); |
890 | bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && |
891 | (bio->bi_opf & REQ_POLLED)); |
892 | struct mapped_device *md = io->md; |
893 | bool requeued = false; |
894 | |
895 | if (handle_requeue || handle_polled_eagain) { |
896 | unsigned long flags; |
897 | |
898 | if (bio->bi_opf & REQ_POLLED) { |
899 | /* |
900 | * Upper layer won't help us poll split bio |
901 | * (io->orig_bio may only reflect a subset of the |
902 | * pre-split original) so clear REQ_POLLED. |
903 | */ |
904 | bio_clear_polled(bio); |
905 | } |
906 | |
907 | /* |
908 | * Target requested pushing back the I/O or |
909 | * polled IO hit BLK_STS_AGAIN. |
910 | */ |
911 | spin_lock_irqsave(&md->deferred_lock, flags); |
912 | if ((__noflush_suspending(md) && |
913 | !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || |
914 | handle_polled_eagain || first_stage) { |
915 | dm_requeue_add_io(io, first_stage); |
916 | requeued = true; |
917 | } else { |
918 | /* |
919 | * noflush suspend was interrupted or this is |
920 | * a write to a zoned target. |
921 | */ |
922 | io->status = BLK_STS_IOERR; |
923 | } |
924 | spin_unlock_irqrestore(lock: &md->deferred_lock, flags); |
925 | } |
926 | |
927 | if (requeued) |
928 | dm_kick_requeue(md, first_stage); |
929 | |
930 | return requeued; |
931 | } |
932 | |
933 | static void __dm_io_complete(struct dm_io *io, bool first_stage) |
934 | { |
935 | struct bio *bio = io->orig_bio; |
936 | struct mapped_device *md = io->md; |
937 | blk_status_t io_error; |
938 | bool requeued; |
939 | |
940 | requeued = dm_handle_requeue(io, first_stage); |
941 | if (requeued && first_stage) |
942 | return; |
943 | |
944 | io_error = io->status; |
945 | if (dm_io_flagged(io, bit: DM_IO_ACCOUNTED)) |
946 | dm_end_io_acct(io); |
947 | else if (!io_error) { |
948 | /* |
949 | * Must handle target that DM_MAPIO_SUBMITTED only to |
950 | * then bio_endio() rather than dm_submit_bio_remap() |
951 | */ |
952 | __dm_start_io_acct(io); |
953 | dm_end_io_acct(io); |
954 | } |
955 | free_io(io); |
956 | smp_wmb(); |
957 | this_cpu_dec(*md->pending_io); |
958 | |
959 | /* nudge anyone waiting on suspend queue */ |
960 | if (unlikely(wq_has_sleeper(&md->wait))) |
961 | wake_up(&md->wait); |
962 | |
963 | /* Return early if the original bio was requeued */ |
964 | if (requeued) |
965 | return; |
966 | |
967 | if (bio_is_flush_with_data(bio)) { |
968 | /* |
969 | * Preflush done for flush with data, reissue |
970 | * without REQ_PREFLUSH. |
971 | */ |
972 | bio->bi_opf &= ~REQ_PREFLUSH; |
973 | queue_io(md, bio); |
974 | } else { |
975 | /* done with normal IO or empty flush */ |
976 | if (io_error) |
977 | bio->bi_status = io_error; |
978 | bio_endio(bio); |
979 | } |
980 | } |
981 | |
982 | static void dm_wq_requeue_work(struct work_struct *work) |
983 | { |
984 | struct mapped_device *md = container_of(work, struct mapped_device, |
985 | requeue_work); |
986 | unsigned long flags; |
987 | struct dm_io *io; |
988 | |
989 | /* reuse deferred lock to simplify dm_handle_requeue */ |
990 | spin_lock_irqsave(&md->deferred_lock, flags); |
991 | io = md->requeue_list; |
992 | md->requeue_list = NULL; |
993 | spin_unlock_irqrestore(lock: &md->deferred_lock, flags); |
994 | |
995 | while (io) { |
996 | struct dm_io *next = io->next; |
997 | |
998 | dm_io_rewind(io, bs: &md->disk->bio_split); |
999 | |
1000 | io->next = NULL; |
1001 | __dm_io_complete(io, first_stage: false); |
1002 | io = next; |
1003 | cond_resched(); |
1004 | } |
1005 | } |
1006 | |
1007 | /* |
1008 | * Two staged requeue: |
1009 | * |
1010 | * 1) io->orig_bio points to the real original bio, and the part mapped to |
1011 | * this io must be requeued, instead of other parts of the original bio. |
1012 | * |
1013 | * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. |
1014 | */ |
1015 | static void dm_io_complete(struct dm_io *io) |
1016 | { |
1017 | bool first_requeue; |
1018 | |
1019 | /* |
1020 | * Only dm_io that has been split needs two stage requeue, otherwise |
1021 | * we may run into long bio clone chain during suspend and OOM could |
1022 | * be triggered. |
1023 | * |
1024 | * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they |
1025 | * also aren't handled via the first stage requeue. |
1026 | */ |
1027 | if (dm_io_flagged(io, bit: DM_IO_WAS_SPLIT)) |
1028 | first_requeue = true; |
1029 | else |
1030 | first_requeue = false; |
1031 | |
1032 | __dm_io_complete(io, first_stage: first_requeue); |
1033 | } |
1034 | |
1035 | /* |
1036 | * Decrements the number of outstanding ios that a bio has been |
1037 | * cloned into, completing the original io if necc. |
1038 | */ |
1039 | static inline void __dm_io_dec_pending(struct dm_io *io) |
1040 | { |
1041 | if (atomic_dec_and_test(v: &io->io_count)) |
1042 | dm_io_complete(io); |
1043 | } |
1044 | |
1045 | static void dm_io_set_error(struct dm_io *io, blk_status_t error) |
1046 | { |
1047 | unsigned long flags; |
1048 | |
1049 | /* Push-back supersedes any I/O errors */ |
1050 | spin_lock_irqsave(&io->lock, flags); |
1051 | if (!(io->status == BLK_STS_DM_REQUEUE && |
1052 | __noflush_suspending(md: io->md))) { |
1053 | io->status = error; |
1054 | } |
1055 | spin_unlock_irqrestore(lock: &io->lock, flags); |
1056 | } |
1057 | |
1058 | static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) |
1059 | { |
1060 | if (unlikely(error)) |
1061 | dm_io_set_error(io, error); |
1062 | |
1063 | __dm_io_dec_pending(io); |
1064 | } |
1065 | |
1066 | /* |
1067 | * The queue_limits are only valid as long as you have a reference |
1068 | * count on 'md'. But _not_ imposing verification to avoid atomic_read(), |
1069 | */ |
1070 | static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md) |
1071 | { |
1072 | return &md->queue->limits; |
1073 | } |
1074 | |
1075 | void disable_discard(struct mapped_device *md) |
1076 | { |
1077 | struct queue_limits *limits = dm_get_queue_limits(md); |
1078 | |
1079 | /* device doesn't really support DISCARD, disable it */ |
1080 | limits->max_discard_sectors = 0; |
1081 | } |
1082 | |
1083 | void disable_write_zeroes(struct mapped_device *md) |
1084 | { |
1085 | struct queue_limits *limits = dm_get_queue_limits(md); |
1086 | |
1087 | /* device doesn't really support WRITE ZEROES, disable it */ |
1088 | limits->max_write_zeroes_sectors = 0; |
1089 | } |
1090 | |
1091 | static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) |
1092 | { |
1093 | return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); |
1094 | } |
1095 | |
1096 | static void clone_endio(struct bio *bio) |
1097 | { |
1098 | blk_status_t error = bio->bi_status; |
1099 | struct dm_target_io *tio = clone_to_tio(clone: bio); |
1100 | struct dm_target *ti = tio->ti; |
1101 | dm_endio_fn endio = ti->type->end_io; |
1102 | struct dm_io *io = tio->io; |
1103 | struct mapped_device *md = io->md; |
1104 | |
1105 | if (unlikely(error == BLK_STS_TARGET)) { |
1106 | if (bio_op(bio) == REQ_OP_DISCARD && |
1107 | !bdev_max_discard_sectors(bdev: bio->bi_bdev)) |
1108 | disable_discard(md); |
1109 | else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && |
1110 | !bdev_write_zeroes_sectors(bdev: bio->bi_bdev)) |
1111 | disable_write_zeroes(md); |
1112 | } |
1113 | |
1114 | if (static_branch_unlikely(&zoned_enabled) && |
1115 | unlikely(bdev_is_zoned(bio->bi_bdev))) |
1116 | dm_zone_endio(io, clone: bio); |
1117 | |
1118 | if (endio) { |
1119 | int r = endio(ti, bio, &error); |
1120 | |
1121 | switch (r) { |
1122 | case DM_ENDIO_REQUEUE: |
1123 | if (static_branch_unlikely(&zoned_enabled)) { |
1124 | /* |
1125 | * Requeuing writes to a sequential zone of a zoned |
1126 | * target will break the sequential write pattern: |
1127 | * fail such IO. |
1128 | */ |
1129 | if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) |
1130 | error = BLK_STS_IOERR; |
1131 | else |
1132 | error = BLK_STS_DM_REQUEUE; |
1133 | } else |
1134 | error = BLK_STS_DM_REQUEUE; |
1135 | fallthrough; |
1136 | case DM_ENDIO_DONE: |
1137 | break; |
1138 | case DM_ENDIO_INCOMPLETE: |
1139 | /* The target will handle the io */ |
1140 | return; |
1141 | default: |
1142 | DMCRIT("unimplemented target endio return value: %d" , r); |
1143 | BUG(); |
1144 | } |
1145 | } |
1146 | |
1147 | if (static_branch_unlikely(&swap_bios_enabled) && |
1148 | unlikely(swap_bios_limit(ti, bio))) |
1149 | up(sem: &md->swap_bios_semaphore); |
1150 | |
1151 | free_tio(clone: bio); |
1152 | dm_io_dec_pending(io, error); |
1153 | } |
1154 | |
1155 | /* |
1156 | * Return maximum size of I/O possible at the supplied sector up to the current |
1157 | * target boundary. |
1158 | */ |
1159 | static inline sector_t max_io_len_target_boundary(struct dm_target *ti, |
1160 | sector_t target_offset) |
1161 | { |
1162 | return ti->len - target_offset; |
1163 | } |
1164 | |
1165 | static sector_t __max_io_len(struct dm_target *ti, sector_t sector, |
1166 | unsigned int max_granularity, |
1167 | unsigned int max_sectors) |
1168 | { |
1169 | sector_t target_offset = dm_target_offset(ti, sector); |
1170 | sector_t len = max_io_len_target_boundary(ti, target_offset); |
1171 | |
1172 | /* |
1173 | * Does the target need to split IO even further? |
1174 | * - varied (per target) IO splitting is a tenet of DM; this |
1175 | * explains why stacked chunk_sectors based splitting via |
1176 | * bio_split_to_limits() isn't possible here. |
1177 | */ |
1178 | if (!max_granularity) |
1179 | return len; |
1180 | return min_t(sector_t, len, |
1181 | min(max_sectors ? : queue_max_sectors(ti->table->md->queue), |
1182 | blk_chunk_sectors_left(target_offset, max_granularity))); |
1183 | } |
1184 | |
1185 | static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) |
1186 | { |
1187 | return __max_io_len(ti, sector, max_granularity: ti->max_io_len, max_sectors: 0); |
1188 | } |
1189 | |
1190 | int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) |
1191 | { |
1192 | if (len > UINT_MAX) { |
1193 | DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)" , |
1194 | (unsigned long long)len, UINT_MAX); |
1195 | ti->error = "Maximum size of target IO is too large" ; |
1196 | return -EINVAL; |
1197 | } |
1198 | |
1199 | ti->max_io_len = (uint32_t) len; |
1200 | |
1201 | return 0; |
1202 | } |
1203 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); |
1204 | |
1205 | static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, |
1206 | sector_t sector, int *srcu_idx) |
1207 | __acquires(md->io_barrier) |
1208 | { |
1209 | struct dm_table *map; |
1210 | struct dm_target *ti; |
1211 | |
1212 | map = dm_get_live_table(md, srcu_idx); |
1213 | if (!map) |
1214 | return NULL; |
1215 | |
1216 | ti = dm_table_find_target(t: map, sector); |
1217 | if (!ti) |
1218 | return NULL; |
1219 | |
1220 | return ti; |
1221 | } |
1222 | |
1223 | static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, |
1224 | long nr_pages, enum dax_access_mode mode, void **kaddr, |
1225 | pfn_t *pfn) |
1226 | { |
1227 | struct mapped_device *md = dax_get_private(dax_dev); |
1228 | sector_t sector = pgoff * PAGE_SECTORS; |
1229 | struct dm_target *ti; |
1230 | long len, ret = -EIO; |
1231 | int srcu_idx; |
1232 | |
1233 | ti = dm_dax_get_live_target(md, sector, srcu_idx: &srcu_idx); |
1234 | |
1235 | if (!ti) |
1236 | goto out; |
1237 | if (!ti->type->direct_access) |
1238 | goto out; |
1239 | len = max_io_len(ti, sector) / PAGE_SECTORS; |
1240 | if (len < 1) |
1241 | goto out; |
1242 | nr_pages = min(len, nr_pages); |
1243 | ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); |
1244 | |
1245 | out: |
1246 | dm_put_live_table(md, srcu_idx); |
1247 | |
1248 | return ret; |
1249 | } |
1250 | |
1251 | static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
1252 | size_t nr_pages) |
1253 | { |
1254 | struct mapped_device *md = dax_get_private(dax_dev); |
1255 | sector_t sector = pgoff * PAGE_SECTORS; |
1256 | struct dm_target *ti; |
1257 | int ret = -EIO; |
1258 | int srcu_idx; |
1259 | |
1260 | ti = dm_dax_get_live_target(md, sector, srcu_idx: &srcu_idx); |
1261 | |
1262 | if (!ti) |
1263 | goto out; |
1264 | if (WARN_ON(!ti->type->dax_zero_page_range)) { |
1265 | /* |
1266 | * ->zero_page_range() is mandatory dax operation. If we are |
1267 | * here, something is wrong. |
1268 | */ |
1269 | goto out; |
1270 | } |
1271 | ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); |
1272 | out: |
1273 | dm_put_live_table(md, srcu_idx); |
1274 | |
1275 | return ret; |
1276 | } |
1277 | |
1278 | static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, |
1279 | void *addr, size_t bytes, struct iov_iter *i) |
1280 | { |
1281 | struct mapped_device *md = dax_get_private(dax_dev); |
1282 | sector_t sector = pgoff * PAGE_SECTORS; |
1283 | struct dm_target *ti; |
1284 | int srcu_idx; |
1285 | long ret = 0; |
1286 | |
1287 | ti = dm_dax_get_live_target(md, sector, srcu_idx: &srcu_idx); |
1288 | if (!ti || !ti->type->dax_recovery_write) |
1289 | goto out; |
1290 | |
1291 | ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); |
1292 | out: |
1293 | dm_put_live_table(md, srcu_idx); |
1294 | return ret; |
1295 | } |
1296 | |
1297 | /* |
1298 | * A target may call dm_accept_partial_bio only from the map routine. It is |
1299 | * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management |
1300 | * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by |
1301 | * __send_duplicate_bios(). |
1302 | * |
1303 | * dm_accept_partial_bio informs the dm that the target only wants to process |
1304 | * additional n_sectors sectors of the bio and the rest of the data should be |
1305 | * sent in a next bio. |
1306 | * |
1307 | * A diagram that explains the arithmetics: |
1308 | * +--------------------+---------------+-------+ |
1309 | * | 1 | 2 | 3 | |
1310 | * +--------------------+---------------+-------+ |
1311 | * |
1312 | * <-------------- *tio->len_ptr ---------------> |
1313 | * <----- bio_sectors -----> |
1314 | * <-- n_sectors --> |
1315 | * |
1316 | * Region 1 was already iterated over with bio_advance or similar function. |
1317 | * (it may be empty if the target doesn't use bio_advance) |
1318 | * Region 2 is the remaining bio size that the target wants to process. |
1319 | * (it may be empty if region 1 is non-empty, although there is no reason |
1320 | * to make it empty) |
1321 | * The target requires that region 3 is to be sent in the next bio. |
1322 | * |
1323 | * If the target wants to receive multiple copies of the bio (via num_*bios, etc), |
1324 | * the partially processed part (the sum of regions 1+2) must be the same for all |
1325 | * copies of the bio. |
1326 | */ |
1327 | void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) |
1328 | { |
1329 | struct dm_target_io *tio = clone_to_tio(clone: bio); |
1330 | struct dm_io *io = tio->io; |
1331 | unsigned int bio_sectors = bio_sectors(bio); |
1332 | |
1333 | BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); |
1334 | BUG_ON(op_is_zone_mgmt(bio_op(bio))); |
1335 | BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); |
1336 | BUG_ON(bio_sectors > *tio->len_ptr); |
1337 | BUG_ON(n_sectors > bio_sectors); |
1338 | |
1339 | *tio->len_ptr -= bio_sectors - n_sectors; |
1340 | bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; |
1341 | |
1342 | /* |
1343 | * __split_and_process_bio() may have already saved mapped part |
1344 | * for accounting but it is being reduced so update accordingly. |
1345 | */ |
1346 | dm_io_set_flag(io, bit: DM_IO_WAS_SPLIT); |
1347 | io->sectors = n_sectors; |
1348 | io->sector_offset = bio_sectors(io->orig_bio); |
1349 | } |
1350 | EXPORT_SYMBOL_GPL(dm_accept_partial_bio); |
1351 | |
1352 | /* |
1353 | * @clone: clone bio that DM core passed to target's .map function |
1354 | * @tgt_clone: clone of @clone bio that target needs submitted |
1355 | * |
1356 | * Targets should use this interface to submit bios they take |
1357 | * ownership of when returning DM_MAPIO_SUBMITTED. |
1358 | * |
1359 | * Target should also enable ti->accounts_remapped_io |
1360 | */ |
1361 | void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) |
1362 | { |
1363 | struct dm_target_io *tio = clone_to_tio(clone); |
1364 | struct dm_io *io = tio->io; |
1365 | |
1366 | /* establish bio that will get submitted */ |
1367 | if (!tgt_clone) |
1368 | tgt_clone = clone; |
1369 | |
1370 | /* |
1371 | * Account io->origin_bio to DM dev on behalf of target |
1372 | * that took ownership of IO with DM_MAPIO_SUBMITTED. |
1373 | */ |
1374 | dm_start_io_acct(io, clone); |
1375 | |
1376 | trace_block_bio_remap(bio: tgt_clone, dev: disk_devt(disk: io->md->disk), |
1377 | from: tio->old_sector); |
1378 | submit_bio_noacct(bio: tgt_clone); |
1379 | } |
1380 | EXPORT_SYMBOL_GPL(dm_submit_bio_remap); |
1381 | |
1382 | static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) |
1383 | { |
1384 | mutex_lock(&md->swap_bios_lock); |
1385 | while (latch < md->swap_bios) { |
1386 | cond_resched(); |
1387 | down(sem: &md->swap_bios_semaphore); |
1388 | md->swap_bios--; |
1389 | } |
1390 | while (latch > md->swap_bios) { |
1391 | cond_resched(); |
1392 | up(sem: &md->swap_bios_semaphore); |
1393 | md->swap_bios++; |
1394 | } |
1395 | mutex_unlock(lock: &md->swap_bios_lock); |
1396 | } |
1397 | |
1398 | static void __map_bio(struct bio *clone) |
1399 | { |
1400 | struct dm_target_io *tio = clone_to_tio(clone); |
1401 | struct dm_target *ti = tio->ti; |
1402 | struct dm_io *io = tio->io; |
1403 | struct mapped_device *md = io->md; |
1404 | int r; |
1405 | |
1406 | clone->bi_end_io = clone_endio; |
1407 | |
1408 | /* |
1409 | * Map the clone. |
1410 | */ |
1411 | tio->old_sector = clone->bi_iter.bi_sector; |
1412 | |
1413 | if (static_branch_unlikely(&swap_bios_enabled) && |
1414 | unlikely(swap_bios_limit(ti, clone))) { |
1415 | int latch = get_swap_bios(); |
1416 | |
1417 | if (unlikely(latch != md->swap_bios)) |
1418 | __set_swap_bios_limit(md, latch); |
1419 | down(sem: &md->swap_bios_semaphore); |
1420 | } |
1421 | |
1422 | if (static_branch_unlikely(&zoned_enabled)) { |
1423 | /* |
1424 | * Check if the IO needs a special mapping due to zone append |
1425 | * emulation on zoned target. In this case, dm_zone_map_bio() |
1426 | * calls the target map operation. |
1427 | */ |
1428 | if (unlikely(dm_emulate_zone_append(md))) |
1429 | r = dm_zone_map_bio(io: tio); |
1430 | else |
1431 | goto do_map; |
1432 | } else { |
1433 | do_map: |
1434 | if (likely(ti->type->map == linear_map)) |
1435 | r = linear_map(ti, bio: clone); |
1436 | else if (ti->type->map == stripe_map) |
1437 | r = stripe_map(ti, bio: clone); |
1438 | else |
1439 | r = ti->type->map(ti, clone); |
1440 | } |
1441 | |
1442 | switch (r) { |
1443 | case DM_MAPIO_SUBMITTED: |
1444 | /* target has assumed ownership of this io */ |
1445 | if (!ti->accounts_remapped_io) |
1446 | dm_start_io_acct(io, clone); |
1447 | break; |
1448 | case DM_MAPIO_REMAPPED: |
1449 | dm_submit_bio_remap(clone, NULL); |
1450 | break; |
1451 | case DM_MAPIO_KILL: |
1452 | case DM_MAPIO_REQUEUE: |
1453 | if (static_branch_unlikely(&swap_bios_enabled) && |
1454 | unlikely(swap_bios_limit(ti, clone))) |
1455 | up(sem: &md->swap_bios_semaphore); |
1456 | free_tio(clone); |
1457 | if (r == DM_MAPIO_KILL) |
1458 | dm_io_dec_pending(io, BLK_STS_IOERR); |
1459 | else |
1460 | dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); |
1461 | break; |
1462 | default: |
1463 | DMCRIT("unimplemented target map return value: %d" , r); |
1464 | BUG(); |
1465 | } |
1466 | } |
1467 | |
1468 | static void setup_split_accounting(struct clone_info *ci, unsigned int len) |
1469 | { |
1470 | struct dm_io *io = ci->io; |
1471 | |
1472 | if (ci->sector_count > len) { |
1473 | /* |
1474 | * Split needed, save the mapped part for accounting. |
1475 | * NOTE: dm_accept_partial_bio() will update accordingly. |
1476 | */ |
1477 | dm_io_set_flag(io, bit: DM_IO_WAS_SPLIT); |
1478 | io->sectors = len; |
1479 | io->sector_offset = bio_sectors(ci->bio); |
1480 | } |
1481 | } |
1482 | |
1483 | static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, |
1484 | struct dm_target *ti, unsigned int num_bios, |
1485 | unsigned *len, gfp_t gfp_flag) |
1486 | { |
1487 | struct bio *bio; |
1488 | int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1; |
1489 | |
1490 | for (; try < 2; try++) { |
1491 | int bio_nr; |
1492 | |
1493 | if (try && num_bios > 1) |
1494 | mutex_lock(&ci->io->md->table_devices_lock); |
1495 | for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { |
1496 | bio = alloc_tio(ci, ti, target_bio_nr: bio_nr, len, |
1497 | gfp_mask: try ? GFP_NOIO : GFP_NOWAIT); |
1498 | if (!bio) |
1499 | break; |
1500 | |
1501 | bio_list_add(bl: blist, bio); |
1502 | } |
1503 | if (try && num_bios > 1) |
1504 | mutex_unlock(lock: &ci->io->md->table_devices_lock); |
1505 | if (bio_nr == num_bios) |
1506 | return; |
1507 | |
1508 | while ((bio = bio_list_pop(bl: blist))) |
1509 | free_tio(clone: bio); |
1510 | } |
1511 | } |
1512 | |
1513 | static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, |
1514 | unsigned int num_bios, unsigned int *len, |
1515 | gfp_t gfp_flag) |
1516 | { |
1517 | struct bio_list blist = BIO_EMPTY_LIST; |
1518 | struct bio *clone; |
1519 | unsigned int ret = 0; |
1520 | |
1521 | if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */ |
1522 | return 0; |
1523 | |
1524 | /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ |
1525 | if (len) |
1526 | setup_split_accounting(ci, len: *len); |
1527 | |
1528 | /* |
1529 | * Using alloc_multiple_bios(), even if num_bios is 1, to consistently |
1530 | * support allocating using GFP_NOWAIT with GFP_NOIO fallback. |
1531 | */ |
1532 | alloc_multiple_bios(blist: &blist, ci, ti, num_bios, len, gfp_flag); |
1533 | while ((clone = bio_list_pop(bl: &blist))) { |
1534 | if (num_bios > 1) |
1535 | dm_tio_set_flag(tio: clone_to_tio(clone), bit: DM_TIO_IS_DUPLICATE_BIO); |
1536 | __map_bio(clone); |
1537 | ret += 1; |
1538 | } |
1539 | |
1540 | return ret; |
1541 | } |
1542 | |
1543 | static void __send_empty_flush(struct clone_info *ci) |
1544 | { |
1545 | struct dm_table *t = ci->map; |
1546 | struct bio flush_bio; |
1547 | |
1548 | /* |
1549 | * Use an on-stack bio for this, it's safe since we don't |
1550 | * need to reference it after submit. It's just used as |
1551 | * the basis for the clone(s). |
1552 | */ |
1553 | bio_init(bio: &flush_bio, bdev: ci->io->md->disk->part0, NULL, max_vecs: 0, |
1554 | opf: REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); |
1555 | |
1556 | ci->bio = &flush_bio; |
1557 | ci->sector_count = 0; |
1558 | ci->io->tio.clone.bi_iter.bi_size = 0; |
1559 | |
1560 | for (unsigned int i = 0; i < t->num_targets; i++) { |
1561 | unsigned int bios; |
1562 | struct dm_target *ti = dm_table_get_target(t, index: i); |
1563 | |
1564 | if (unlikely(ti->num_flush_bios == 0)) |
1565 | continue; |
1566 | |
1567 | atomic_add(i: ti->num_flush_bios, v: &ci->io->io_count); |
1568 | bios = __send_duplicate_bios(ci, ti, num_bios: ti->num_flush_bios, |
1569 | NULL, GFP_NOWAIT); |
1570 | atomic_sub(i: ti->num_flush_bios - bios, v: &ci->io->io_count); |
1571 | } |
1572 | |
1573 | /* |
1574 | * alloc_io() takes one extra reference for submission, so the |
1575 | * reference won't reach 0 without the following subtraction |
1576 | */ |
1577 | atomic_sub(i: 1, v: &ci->io->io_count); |
1578 | |
1579 | bio_uninit(ci->bio); |
1580 | } |
1581 | |
1582 | static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti, |
1583 | unsigned int num_bios, unsigned int max_granularity, |
1584 | unsigned int max_sectors) |
1585 | { |
1586 | unsigned int len, bios; |
1587 | |
1588 | len = min_t(sector_t, ci->sector_count, |
1589 | __max_io_len(ti, ci->sector, max_granularity, max_sectors)); |
1590 | |
1591 | atomic_add(i: num_bios, v: &ci->io->io_count); |
1592 | bios = __send_duplicate_bios(ci, ti, num_bios, len: &len, GFP_NOIO); |
1593 | /* |
1594 | * alloc_io() takes one extra reference for submission, so the |
1595 | * reference won't reach 0 without the following (+1) subtraction |
1596 | */ |
1597 | atomic_sub(i: num_bios - bios + 1, v: &ci->io->io_count); |
1598 | |
1599 | ci->sector += len; |
1600 | ci->sector_count -= len; |
1601 | } |
1602 | |
1603 | static bool is_abnormal_io(struct bio *bio) |
1604 | { |
1605 | enum req_op op = bio_op(bio); |
1606 | |
1607 | if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { |
1608 | switch (op) { |
1609 | case REQ_OP_DISCARD: |
1610 | case REQ_OP_SECURE_ERASE: |
1611 | case REQ_OP_WRITE_ZEROES: |
1612 | return true; |
1613 | default: |
1614 | break; |
1615 | } |
1616 | } |
1617 | |
1618 | return false; |
1619 | } |
1620 | |
1621 | static blk_status_t __process_abnormal_io(struct clone_info *ci, |
1622 | struct dm_target *ti) |
1623 | { |
1624 | unsigned int num_bios = 0; |
1625 | unsigned int max_granularity = 0; |
1626 | unsigned int max_sectors = 0; |
1627 | struct queue_limits *limits = dm_get_queue_limits(md: ti->table->md); |
1628 | |
1629 | switch (bio_op(bio: ci->bio)) { |
1630 | case REQ_OP_DISCARD: |
1631 | num_bios = ti->num_discard_bios; |
1632 | max_sectors = limits->max_discard_sectors; |
1633 | if (ti->max_discard_granularity) |
1634 | max_granularity = max_sectors; |
1635 | break; |
1636 | case REQ_OP_SECURE_ERASE: |
1637 | num_bios = ti->num_secure_erase_bios; |
1638 | max_sectors = limits->max_secure_erase_sectors; |
1639 | if (ti->max_secure_erase_granularity) |
1640 | max_granularity = max_sectors; |
1641 | break; |
1642 | case REQ_OP_WRITE_ZEROES: |
1643 | num_bios = ti->num_write_zeroes_bios; |
1644 | max_sectors = limits->max_write_zeroes_sectors; |
1645 | if (ti->max_write_zeroes_granularity) |
1646 | max_granularity = max_sectors; |
1647 | break; |
1648 | default: |
1649 | break; |
1650 | } |
1651 | |
1652 | /* |
1653 | * Even though the device advertised support for this type of |
1654 | * request, that does not mean every target supports it, and |
1655 | * reconfiguration might also have changed that since the |
1656 | * check was performed. |
1657 | */ |
1658 | if (unlikely(!num_bios)) |
1659 | return BLK_STS_NOTSUPP; |
1660 | |
1661 | __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors); |
1662 | |
1663 | return BLK_STS_OK; |
1664 | } |
1665 | |
1666 | /* |
1667 | * Reuse ->bi_private as dm_io list head for storing all dm_io instances |
1668 | * associated with this bio, and this bio's bi_private needs to be |
1669 | * stored in dm_io->data before the reuse. |
1670 | * |
1671 | * bio->bi_private is owned by fs or upper layer, so block layer won't |
1672 | * touch it after splitting. Meantime it won't be changed by anyone after |
1673 | * bio is submitted. So this reuse is safe. |
1674 | */ |
1675 | static inline struct dm_io **dm_poll_list_head(struct bio *bio) |
1676 | { |
1677 | return (struct dm_io **)&bio->bi_private; |
1678 | } |
1679 | |
1680 | static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) |
1681 | { |
1682 | struct dm_io **head = dm_poll_list_head(bio); |
1683 | |
1684 | if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { |
1685 | bio->bi_opf |= REQ_DM_POLL_LIST; |
1686 | /* |
1687 | * Save .bi_private into dm_io, so that we can reuse |
1688 | * .bi_private as dm_io list head for storing dm_io list |
1689 | */ |
1690 | io->data = bio->bi_private; |
1691 | |
1692 | /* tell block layer to poll for completion */ |
1693 | bio->bi_cookie = ~BLK_QC_T_NONE; |
1694 | |
1695 | io->next = NULL; |
1696 | } else { |
1697 | /* |
1698 | * bio recursed due to split, reuse original poll list, |
1699 | * and save bio->bi_private too. |
1700 | */ |
1701 | io->data = (*head)->data; |
1702 | io->next = *head; |
1703 | } |
1704 | |
1705 | *head = io; |
1706 | } |
1707 | |
1708 | /* |
1709 | * Select the correct strategy for processing a non-flush bio. |
1710 | */ |
1711 | static blk_status_t __split_and_process_bio(struct clone_info *ci) |
1712 | { |
1713 | struct bio *clone; |
1714 | struct dm_target *ti; |
1715 | unsigned int len; |
1716 | |
1717 | ti = dm_table_find_target(t: ci->map, sector: ci->sector); |
1718 | if (unlikely(!ti)) |
1719 | return BLK_STS_IOERR; |
1720 | |
1721 | if (unlikely(ci->is_abnormal_io)) |
1722 | return __process_abnormal_io(ci, ti); |
1723 | |
1724 | /* |
1725 | * Only support bio polling for normal IO, and the target io is |
1726 | * exactly inside the dm_io instance (verified in dm_poll_dm_io) |
1727 | */ |
1728 | ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); |
1729 | |
1730 | len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); |
1731 | setup_split_accounting(ci, len); |
1732 | |
1733 | if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) { |
1734 | if (unlikely(!dm_target_supports_nowait(ti->type))) |
1735 | return BLK_STS_NOTSUPP; |
1736 | |
1737 | clone = alloc_tio(ci, ti, target_bio_nr: 0, len: &len, GFP_NOWAIT); |
1738 | if (unlikely(!clone)) |
1739 | return BLK_STS_AGAIN; |
1740 | } else { |
1741 | clone = alloc_tio(ci, ti, target_bio_nr: 0, len: &len, GFP_NOIO); |
1742 | } |
1743 | __map_bio(clone); |
1744 | |
1745 | ci->sector += len; |
1746 | ci->sector_count -= len; |
1747 | |
1748 | return BLK_STS_OK; |
1749 | } |
1750 | |
1751 | static void init_clone_info(struct clone_info *ci, struct dm_io *io, |
1752 | struct dm_table *map, struct bio *bio, bool is_abnormal) |
1753 | { |
1754 | ci->map = map; |
1755 | ci->io = io; |
1756 | ci->bio = bio; |
1757 | ci->is_abnormal_io = is_abnormal; |
1758 | ci->submit_as_polled = false; |
1759 | ci->sector = bio->bi_iter.bi_sector; |
1760 | ci->sector_count = bio_sectors(bio); |
1761 | |
1762 | /* Shouldn't happen but sector_count was being set to 0 so... */ |
1763 | if (static_branch_unlikely(&zoned_enabled) && |
1764 | WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) |
1765 | ci->sector_count = 0; |
1766 | } |
1767 | |
1768 | /* |
1769 | * Entry point to split a bio into clones and submit them to the targets. |
1770 | */ |
1771 | static void dm_split_and_process_bio(struct mapped_device *md, |
1772 | struct dm_table *map, struct bio *bio) |
1773 | { |
1774 | struct clone_info ci; |
1775 | struct dm_io *io; |
1776 | blk_status_t error = BLK_STS_OK; |
1777 | bool is_abnormal; |
1778 | |
1779 | is_abnormal = is_abnormal_io(bio); |
1780 | if (unlikely(is_abnormal)) { |
1781 | /* |
1782 | * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc) |
1783 | * otherwise associated queue_limits won't be imposed. |
1784 | */ |
1785 | bio = bio_split_to_limits(bio); |
1786 | if (!bio) |
1787 | return; |
1788 | } |
1789 | |
1790 | /* Only support nowait for normal IO */ |
1791 | if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) { |
1792 | io = alloc_io(md, bio, GFP_NOWAIT); |
1793 | if (unlikely(!io)) { |
1794 | /* Unable to do anything without dm_io. */ |
1795 | bio_wouldblock_error(bio); |
1796 | return; |
1797 | } |
1798 | } else { |
1799 | io = alloc_io(md, bio, GFP_NOIO); |
1800 | } |
1801 | init_clone_info(ci: &ci, io, map, bio, is_abnormal); |
1802 | |
1803 | if (bio->bi_opf & REQ_PREFLUSH) { |
1804 | __send_empty_flush(ci: &ci); |
1805 | /* dm_io_complete submits any data associated with flush */ |
1806 | goto out; |
1807 | } |
1808 | |
1809 | error = __split_and_process_bio(ci: &ci); |
1810 | if (error || !ci.sector_count) |
1811 | goto out; |
1812 | /* |
1813 | * Remainder must be passed to submit_bio_noacct() so it gets handled |
1814 | * *after* bios already submitted have been completely processed. |
1815 | */ |
1816 | bio_trim(bio, offset: io->sectors, size: ci.sector_count); |
1817 | trace_block_split(bio, new_sector: bio->bi_iter.bi_sector); |
1818 | bio_inc_remaining(bio); |
1819 | submit_bio_noacct(bio); |
1820 | out: |
1821 | /* |
1822 | * Drop the extra reference count for non-POLLED bio, and hold one |
1823 | * reference for POLLED bio, which will be released in dm_poll_bio |
1824 | * |
1825 | * Add every dm_io instance into the dm_io list head which is stored |
1826 | * in bio->bi_private, so that dm_poll_bio can poll them all. |
1827 | */ |
1828 | if (error || !ci.submit_as_polled) { |
1829 | /* |
1830 | * In case of submission failure, the extra reference for |
1831 | * submitting io isn't consumed yet |
1832 | */ |
1833 | if (error) |
1834 | atomic_dec(v: &io->io_count); |
1835 | dm_io_dec_pending(io, error); |
1836 | } else |
1837 | dm_queue_poll_io(bio, io); |
1838 | } |
1839 | |
1840 | static void dm_submit_bio(struct bio *bio) |
1841 | { |
1842 | struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; |
1843 | int srcu_idx; |
1844 | struct dm_table *map; |
1845 | |
1846 | map = dm_get_live_table(md, srcu_idx: &srcu_idx); |
1847 | |
1848 | /* If suspended, or map not yet available, queue this IO for later */ |
1849 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || |
1850 | unlikely(!map)) { |
1851 | if (bio->bi_opf & REQ_NOWAIT) |
1852 | bio_wouldblock_error(bio); |
1853 | else if (bio->bi_opf & REQ_RAHEAD) |
1854 | bio_io_error(bio); |
1855 | else |
1856 | queue_io(md, bio); |
1857 | goto out; |
1858 | } |
1859 | |
1860 | dm_split_and_process_bio(md, map, bio); |
1861 | out: |
1862 | dm_put_live_table(md, srcu_idx); |
1863 | } |
1864 | |
1865 | static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, |
1866 | unsigned int flags) |
1867 | { |
1868 | WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); |
1869 | |
1870 | /* don't poll if the mapped io is done */ |
1871 | if (atomic_read(v: &io->io_count) > 1) |
1872 | bio_poll(bio: &io->tio.clone, iob, flags); |
1873 | |
1874 | /* bio_poll holds the last reference */ |
1875 | return atomic_read(v: &io->io_count) == 1; |
1876 | } |
1877 | |
1878 | static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, |
1879 | unsigned int flags) |
1880 | { |
1881 | struct dm_io **head = dm_poll_list_head(bio); |
1882 | struct dm_io *list = *head; |
1883 | struct dm_io *tmp = NULL; |
1884 | struct dm_io *curr, *next; |
1885 | |
1886 | /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ |
1887 | if (!(bio->bi_opf & REQ_DM_POLL_LIST)) |
1888 | return 0; |
1889 | |
1890 | WARN_ON_ONCE(!list); |
1891 | |
1892 | /* |
1893 | * Restore .bi_private before possibly completing dm_io. |
1894 | * |
1895 | * bio_poll() is only possible once @bio has been completely |
1896 | * submitted via submit_bio_noacct()'s depth-first submission. |
1897 | * So there is no dm_queue_poll_io() race associated with |
1898 | * clearing REQ_DM_POLL_LIST here. |
1899 | */ |
1900 | bio->bi_opf &= ~REQ_DM_POLL_LIST; |
1901 | bio->bi_private = list->data; |
1902 | |
1903 | for (curr = list, next = curr->next; curr; curr = next, next = |
1904 | curr ? curr->next : NULL) { |
1905 | if (dm_poll_dm_io(io: curr, iob, flags)) { |
1906 | /* |
1907 | * clone_endio() has already occurred, so no |
1908 | * error handling is needed here. |
1909 | */ |
1910 | __dm_io_dec_pending(io: curr); |
1911 | } else { |
1912 | curr->next = tmp; |
1913 | tmp = curr; |
1914 | } |
1915 | } |
1916 | |
1917 | /* Not done? */ |
1918 | if (tmp) { |
1919 | bio->bi_opf |= REQ_DM_POLL_LIST; |
1920 | /* Reset bio->bi_private to dm_io list head */ |
1921 | *head = tmp; |
1922 | return 0; |
1923 | } |
1924 | return 1; |
1925 | } |
1926 | |
1927 | /* |
1928 | *--------------------------------------------------------------- |
1929 | * An IDR is used to keep track of allocated minor numbers. |
1930 | *--------------------------------------------------------------- |
1931 | */ |
1932 | static void free_minor(int minor) |
1933 | { |
1934 | spin_lock(lock: &_minor_lock); |
1935 | idr_remove(&_minor_idr, id: minor); |
1936 | spin_unlock(lock: &_minor_lock); |
1937 | } |
1938 | |
1939 | /* |
1940 | * See if the device with a specific minor # is free. |
1941 | */ |
1942 | static int specific_minor(int minor) |
1943 | { |
1944 | int r; |
1945 | |
1946 | if (minor >= (1 << MINORBITS)) |
1947 | return -EINVAL; |
1948 | |
1949 | idr_preload(GFP_KERNEL); |
1950 | spin_lock(lock: &_minor_lock); |
1951 | |
1952 | r = idr_alloc(&_minor_idr, MINOR_ALLOCED, start: minor, end: minor + 1, GFP_NOWAIT); |
1953 | |
1954 | spin_unlock(lock: &_minor_lock); |
1955 | idr_preload_end(); |
1956 | if (r < 0) |
1957 | return r == -ENOSPC ? -EBUSY : r; |
1958 | return 0; |
1959 | } |
1960 | |
1961 | static int next_free_minor(int *minor) |
1962 | { |
1963 | int r; |
1964 | |
1965 | idr_preload(GFP_KERNEL); |
1966 | spin_lock(lock: &_minor_lock); |
1967 | |
1968 | r = idr_alloc(&_minor_idr, MINOR_ALLOCED, start: 0, end: 1 << MINORBITS, GFP_NOWAIT); |
1969 | |
1970 | spin_unlock(lock: &_minor_lock); |
1971 | idr_preload_end(); |
1972 | if (r < 0) |
1973 | return r; |
1974 | *minor = r; |
1975 | return 0; |
1976 | } |
1977 | |
1978 | static const struct block_device_operations dm_blk_dops; |
1979 | static const struct block_device_operations dm_rq_blk_dops; |
1980 | static const struct dax_operations dm_dax_ops; |
1981 | |
1982 | static void dm_wq_work(struct work_struct *work); |
1983 | |
1984 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
1985 | static void dm_queue_destroy_crypto_profile(struct request_queue *q) |
1986 | { |
1987 | dm_destroy_crypto_profile(profile: q->crypto_profile); |
1988 | } |
1989 | |
1990 | #else /* CONFIG_BLK_INLINE_ENCRYPTION */ |
1991 | |
1992 | static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) |
1993 | { |
1994 | } |
1995 | #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ |
1996 | |
1997 | static void cleanup_mapped_device(struct mapped_device *md) |
1998 | { |
1999 | if (md->wq) |
2000 | destroy_workqueue(wq: md->wq); |
2001 | dm_free_md_mempools(pools: md->mempools); |
2002 | |
2003 | if (md->dax_dev) { |
2004 | dax_remove_host(disk: md->disk); |
2005 | kill_dax(dax_dev: md->dax_dev); |
2006 | put_dax(dax_dev: md->dax_dev); |
2007 | md->dax_dev = NULL; |
2008 | } |
2009 | |
2010 | dm_cleanup_zoned_dev(md); |
2011 | if (md->disk) { |
2012 | spin_lock(lock: &_minor_lock); |
2013 | md->disk->private_data = NULL; |
2014 | spin_unlock(lock: &_minor_lock); |
2015 | if (dm_get_md_type(md) != DM_TYPE_NONE) { |
2016 | struct table_device *td; |
2017 | |
2018 | dm_sysfs_exit(md); |
2019 | list_for_each_entry(td, &md->table_devices, list) { |
2020 | bd_unlink_disk_holder(bdev: td->dm_dev.bdev, |
2021 | disk: md->disk); |
2022 | } |
2023 | |
2024 | /* |
2025 | * Hold lock to make sure del_gendisk() won't concurrent |
2026 | * with open/close_table_device(). |
2027 | */ |
2028 | mutex_lock(&md->table_devices_lock); |
2029 | del_gendisk(gp: md->disk); |
2030 | mutex_unlock(lock: &md->table_devices_lock); |
2031 | } |
2032 | dm_queue_destroy_crypto_profile(q: md->queue); |
2033 | put_disk(disk: md->disk); |
2034 | } |
2035 | |
2036 | if (md->pending_io) { |
2037 | free_percpu(pdata: md->pending_io); |
2038 | md->pending_io = NULL; |
2039 | } |
2040 | |
2041 | cleanup_srcu_struct(ssp: &md->io_barrier); |
2042 | |
2043 | mutex_destroy(lock: &md->suspend_lock); |
2044 | mutex_destroy(lock: &md->type_lock); |
2045 | mutex_destroy(lock: &md->table_devices_lock); |
2046 | mutex_destroy(lock: &md->swap_bios_lock); |
2047 | |
2048 | dm_mq_cleanup_mapped_device(md); |
2049 | } |
2050 | |
2051 | /* |
2052 | * Allocate and initialise a blank device with a given minor. |
2053 | */ |
2054 | static struct mapped_device *alloc_dev(int minor) |
2055 | { |
2056 | int r, numa_node_id = dm_get_numa_node(); |
2057 | struct mapped_device *md; |
2058 | void *old_md; |
2059 | |
2060 | md = kvzalloc_node(size: sizeof(*md), GFP_KERNEL, node: numa_node_id); |
2061 | if (!md) { |
2062 | DMERR("unable to allocate device, out of memory." ); |
2063 | return NULL; |
2064 | } |
2065 | |
2066 | if (!try_module_get(THIS_MODULE)) |
2067 | goto bad_module_get; |
2068 | |
2069 | /* get a minor number for the dev */ |
2070 | if (minor == DM_ANY_MINOR) |
2071 | r = next_free_minor(minor: &minor); |
2072 | else |
2073 | r = specific_minor(minor); |
2074 | if (r < 0) |
2075 | goto bad_minor; |
2076 | |
2077 | r = init_srcu_struct(&md->io_barrier); |
2078 | if (r < 0) |
2079 | goto bad_io_barrier; |
2080 | |
2081 | md->numa_node_id = numa_node_id; |
2082 | md->init_tio_pdu = false; |
2083 | md->type = DM_TYPE_NONE; |
2084 | mutex_init(&md->suspend_lock); |
2085 | mutex_init(&md->type_lock); |
2086 | mutex_init(&md->table_devices_lock); |
2087 | spin_lock_init(&md->deferred_lock); |
2088 | atomic_set(v: &md->holders, i: 1); |
2089 | atomic_set(v: &md->open_count, i: 0); |
2090 | atomic_set(v: &md->event_nr, i: 0); |
2091 | atomic_set(v: &md->uevent_seq, i: 0); |
2092 | INIT_LIST_HEAD(list: &md->uevent_list); |
2093 | INIT_LIST_HEAD(list: &md->table_devices); |
2094 | spin_lock_init(&md->uevent_lock); |
2095 | |
2096 | /* |
2097 | * default to bio-based until DM table is loaded and md->type |
2098 | * established. If request-based table is loaded: blk-mq will |
2099 | * override accordingly. |
2100 | */ |
2101 | md->disk = blk_alloc_disk(md->numa_node_id); |
2102 | if (!md->disk) |
2103 | goto bad; |
2104 | md->queue = md->disk->queue; |
2105 | |
2106 | init_waitqueue_head(&md->wait); |
2107 | INIT_WORK(&md->work, dm_wq_work); |
2108 | INIT_WORK(&md->requeue_work, dm_wq_requeue_work); |
2109 | init_waitqueue_head(&md->eventq); |
2110 | init_completion(x: &md->kobj_holder.completion); |
2111 | |
2112 | md->requeue_list = NULL; |
2113 | md->swap_bios = get_swap_bios(); |
2114 | sema_init(sem: &md->swap_bios_semaphore, val: md->swap_bios); |
2115 | mutex_init(&md->swap_bios_lock); |
2116 | |
2117 | md->disk->major = _major; |
2118 | md->disk->first_minor = minor; |
2119 | md->disk->minors = 1; |
2120 | md->disk->flags |= GENHD_FL_NO_PART; |
2121 | md->disk->fops = &dm_blk_dops; |
2122 | md->disk->private_data = md; |
2123 | sprintf(buf: md->disk->disk_name, fmt: "dm-%d" , minor); |
2124 | |
2125 | if (IS_ENABLED(CONFIG_FS_DAX)) { |
2126 | md->dax_dev = alloc_dax(private: md, ops: &dm_dax_ops); |
2127 | if (IS_ERR(ptr: md->dax_dev)) { |
2128 | md->dax_dev = NULL; |
2129 | goto bad; |
2130 | } |
2131 | set_dax_nocache(md->dax_dev); |
2132 | set_dax_nomc(md->dax_dev); |
2133 | if (dax_add_host(dax_dev: md->dax_dev, disk: md->disk)) |
2134 | goto bad; |
2135 | } |
2136 | |
2137 | format_dev_t(md->name, MKDEV(_major, minor)); |
2138 | |
2139 | md->wq = alloc_workqueue(fmt: "kdmflush/%s" , flags: WQ_MEM_RECLAIM, max_active: 0, md->name); |
2140 | if (!md->wq) |
2141 | goto bad; |
2142 | |
2143 | md->pending_io = alloc_percpu(unsigned long); |
2144 | if (!md->pending_io) |
2145 | goto bad; |
2146 | |
2147 | r = dm_stats_init(st: &md->stats); |
2148 | if (r < 0) |
2149 | goto bad; |
2150 | |
2151 | /* Populate the mapping, nobody knows we exist yet */ |
2152 | spin_lock(lock: &_minor_lock); |
2153 | old_md = idr_replace(&_minor_idr, md, id: minor); |
2154 | spin_unlock(lock: &_minor_lock); |
2155 | |
2156 | BUG_ON(old_md != MINOR_ALLOCED); |
2157 | |
2158 | return md; |
2159 | |
2160 | bad: |
2161 | cleanup_mapped_device(md); |
2162 | bad_io_barrier: |
2163 | free_minor(minor); |
2164 | bad_minor: |
2165 | module_put(THIS_MODULE); |
2166 | bad_module_get: |
2167 | kvfree(addr: md); |
2168 | return NULL; |
2169 | } |
2170 | |
2171 | static void unlock_fs(struct mapped_device *md); |
2172 | |
2173 | static void free_dev(struct mapped_device *md) |
2174 | { |
2175 | int minor = MINOR(disk_devt(md->disk)); |
2176 | |
2177 | unlock_fs(md); |
2178 | |
2179 | cleanup_mapped_device(md); |
2180 | |
2181 | WARN_ON_ONCE(!list_empty(&md->table_devices)); |
2182 | dm_stats_cleanup(st: &md->stats); |
2183 | free_minor(minor); |
2184 | |
2185 | module_put(THIS_MODULE); |
2186 | kvfree(addr: md); |
2187 | } |
2188 | |
2189 | /* |
2190 | * Bind a table to the device. |
2191 | */ |
2192 | static void event_callback(void *context) |
2193 | { |
2194 | unsigned long flags; |
2195 | LIST_HEAD(uevents); |
2196 | struct mapped_device *md = context; |
2197 | |
2198 | spin_lock_irqsave(&md->uevent_lock, flags); |
2199 | list_splice_init(list: &md->uevent_list, head: &uevents); |
2200 | spin_unlock_irqrestore(lock: &md->uevent_lock, flags); |
2201 | |
2202 | dm_send_uevents(events: &uevents, kobj: &disk_to_dev(md->disk)->kobj); |
2203 | |
2204 | atomic_inc(v: &md->event_nr); |
2205 | wake_up(&md->eventq); |
2206 | dm_issue_global_event(); |
2207 | } |
2208 | |
2209 | /* |
2210 | * Returns old map, which caller must destroy. |
2211 | */ |
2212 | static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, |
2213 | struct queue_limits *limits) |
2214 | { |
2215 | struct dm_table *old_map; |
2216 | sector_t size; |
2217 | int ret; |
2218 | |
2219 | lockdep_assert_held(&md->suspend_lock); |
2220 | |
2221 | size = dm_table_get_size(t); |
2222 | |
2223 | /* |
2224 | * Wipe any geometry if the size of the table changed. |
2225 | */ |
2226 | if (size != dm_get_size(md)) |
2227 | memset(&md->geometry, 0, sizeof(md->geometry)); |
2228 | |
2229 | set_capacity(disk: md->disk, size); |
2230 | |
2231 | dm_table_event_callback(t, fn: event_callback, context: md); |
2232 | |
2233 | if (dm_table_request_based(t)) { |
2234 | /* |
2235 | * Leverage the fact that request-based DM targets are |
2236 | * immutable singletons - used to optimize dm_mq_queue_rq. |
2237 | */ |
2238 | md->immutable_target = dm_table_get_immutable_target(t); |
2239 | |
2240 | /* |
2241 | * There is no need to reload with request-based dm because the |
2242 | * size of front_pad doesn't change. |
2243 | * |
2244 | * Note for future: If you are to reload bioset, prep-ed |
2245 | * requests in the queue may refer to bio from the old bioset, |
2246 | * so you must walk through the queue to unprep. |
2247 | */ |
2248 | if (!md->mempools) { |
2249 | md->mempools = t->mempools; |
2250 | t->mempools = NULL; |
2251 | } |
2252 | } else { |
2253 | /* |
2254 | * The md may already have mempools that need changing. |
2255 | * If so, reload bioset because front_pad may have changed |
2256 | * because a different table was loaded. |
2257 | */ |
2258 | dm_free_md_mempools(pools: md->mempools); |
2259 | md->mempools = t->mempools; |
2260 | t->mempools = NULL; |
2261 | } |
2262 | |
2263 | ret = dm_table_set_restrictions(t, q: md->queue, limits); |
2264 | if (ret) { |
2265 | old_map = ERR_PTR(error: ret); |
2266 | goto out; |
2267 | } |
2268 | |
2269 | old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); |
2270 | rcu_assign_pointer(md->map, (void *)t); |
2271 | md->immutable_target_type = dm_table_get_immutable_target_type(t); |
2272 | |
2273 | if (old_map) |
2274 | dm_sync_table(md); |
2275 | out: |
2276 | return old_map; |
2277 | } |
2278 | |
2279 | /* |
2280 | * Returns unbound table for the caller to free. |
2281 | */ |
2282 | static struct dm_table *__unbind(struct mapped_device *md) |
2283 | { |
2284 | struct dm_table *map = rcu_dereference_protected(md->map, 1); |
2285 | |
2286 | if (!map) |
2287 | return NULL; |
2288 | |
2289 | dm_table_event_callback(t: map, NULL, NULL); |
2290 | RCU_INIT_POINTER(md->map, NULL); |
2291 | dm_sync_table(md); |
2292 | |
2293 | return map; |
2294 | } |
2295 | |
2296 | /* |
2297 | * Constructor for a new device. |
2298 | */ |
2299 | int dm_create(int minor, struct mapped_device **result) |
2300 | { |
2301 | struct mapped_device *md; |
2302 | |
2303 | md = alloc_dev(minor); |
2304 | if (!md) |
2305 | return -ENXIO; |
2306 | |
2307 | dm_ima_reset_data(md); |
2308 | |
2309 | *result = md; |
2310 | return 0; |
2311 | } |
2312 | |
2313 | /* |
2314 | * Functions to manage md->type. |
2315 | * All are required to hold md->type_lock. |
2316 | */ |
2317 | void dm_lock_md_type(struct mapped_device *md) |
2318 | { |
2319 | mutex_lock(&md->type_lock); |
2320 | } |
2321 | |
2322 | void dm_unlock_md_type(struct mapped_device *md) |
2323 | { |
2324 | mutex_unlock(lock: &md->type_lock); |
2325 | } |
2326 | |
2327 | void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) |
2328 | { |
2329 | BUG_ON(!mutex_is_locked(&md->type_lock)); |
2330 | md->type = type; |
2331 | } |
2332 | |
2333 | enum dm_queue_mode dm_get_md_type(struct mapped_device *md) |
2334 | { |
2335 | return md->type; |
2336 | } |
2337 | |
2338 | struct target_type *dm_get_immutable_target_type(struct mapped_device *md) |
2339 | { |
2340 | return md->immutable_target_type; |
2341 | } |
2342 | |
2343 | /* |
2344 | * Setup the DM device's queue based on md's type |
2345 | */ |
2346 | int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) |
2347 | { |
2348 | enum dm_queue_mode type = dm_table_get_type(t); |
2349 | struct queue_limits limits; |
2350 | struct table_device *td; |
2351 | int r; |
2352 | |
2353 | switch (type) { |
2354 | case DM_TYPE_REQUEST_BASED: |
2355 | md->disk->fops = &dm_rq_blk_dops; |
2356 | r = dm_mq_init_request_queue(md, t); |
2357 | if (r) { |
2358 | DMERR("Cannot initialize queue for request-based dm mapped device" ); |
2359 | return r; |
2360 | } |
2361 | break; |
2362 | case DM_TYPE_BIO_BASED: |
2363 | case DM_TYPE_DAX_BIO_BASED: |
2364 | blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q: md->queue); |
2365 | break; |
2366 | case DM_TYPE_NONE: |
2367 | WARN_ON_ONCE(true); |
2368 | break; |
2369 | } |
2370 | |
2371 | r = dm_calculate_queue_limits(table: t, limits: &limits); |
2372 | if (r) { |
2373 | DMERR("Cannot calculate initial queue limits" ); |
2374 | return r; |
2375 | } |
2376 | r = dm_table_set_restrictions(t, q: md->queue, limits: &limits); |
2377 | if (r) |
2378 | return r; |
2379 | |
2380 | /* |
2381 | * Hold lock to make sure add_disk() and del_gendisk() won't concurrent |
2382 | * with open_table_device() and close_table_device(). |
2383 | */ |
2384 | mutex_lock(&md->table_devices_lock); |
2385 | r = add_disk(disk: md->disk); |
2386 | mutex_unlock(lock: &md->table_devices_lock); |
2387 | if (r) |
2388 | return r; |
2389 | |
2390 | /* |
2391 | * Register the holder relationship for devices added before the disk |
2392 | * was live. |
2393 | */ |
2394 | list_for_each_entry(td, &md->table_devices, list) { |
2395 | r = bd_link_disk_holder(bdev: td->dm_dev.bdev, disk: md->disk); |
2396 | if (r) |
2397 | goto out_undo_holders; |
2398 | } |
2399 | |
2400 | r = dm_sysfs_init(md); |
2401 | if (r) |
2402 | goto out_undo_holders; |
2403 | |
2404 | md->type = type; |
2405 | return 0; |
2406 | |
2407 | out_undo_holders: |
2408 | list_for_each_entry_continue_reverse(td, &md->table_devices, list) |
2409 | bd_unlink_disk_holder(bdev: td->dm_dev.bdev, disk: md->disk); |
2410 | mutex_lock(&md->table_devices_lock); |
2411 | del_gendisk(gp: md->disk); |
2412 | mutex_unlock(lock: &md->table_devices_lock); |
2413 | return r; |
2414 | } |
2415 | |
2416 | struct mapped_device *dm_get_md(dev_t dev) |
2417 | { |
2418 | struct mapped_device *md; |
2419 | unsigned int minor = MINOR(dev); |
2420 | |
2421 | if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) |
2422 | return NULL; |
2423 | |
2424 | spin_lock(lock: &_minor_lock); |
2425 | |
2426 | md = idr_find(&_minor_idr, id: minor); |
2427 | if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || |
2428 | test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { |
2429 | md = NULL; |
2430 | goto out; |
2431 | } |
2432 | dm_get(md); |
2433 | out: |
2434 | spin_unlock(lock: &_minor_lock); |
2435 | |
2436 | return md; |
2437 | } |
2438 | EXPORT_SYMBOL_GPL(dm_get_md); |
2439 | |
2440 | void *dm_get_mdptr(struct mapped_device *md) |
2441 | { |
2442 | return md->interface_ptr; |
2443 | } |
2444 | |
2445 | void dm_set_mdptr(struct mapped_device *md, void *ptr) |
2446 | { |
2447 | md->interface_ptr = ptr; |
2448 | } |
2449 | |
2450 | void dm_get(struct mapped_device *md) |
2451 | { |
2452 | atomic_inc(v: &md->holders); |
2453 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
2454 | } |
2455 | |
2456 | int dm_hold(struct mapped_device *md) |
2457 | { |
2458 | spin_lock(lock: &_minor_lock); |
2459 | if (test_bit(DMF_FREEING, &md->flags)) { |
2460 | spin_unlock(lock: &_minor_lock); |
2461 | return -EBUSY; |
2462 | } |
2463 | dm_get(md); |
2464 | spin_unlock(lock: &_minor_lock); |
2465 | return 0; |
2466 | } |
2467 | EXPORT_SYMBOL_GPL(dm_hold); |
2468 | |
2469 | const char *dm_device_name(struct mapped_device *md) |
2470 | { |
2471 | return md->name; |
2472 | } |
2473 | EXPORT_SYMBOL_GPL(dm_device_name); |
2474 | |
2475 | static void __dm_destroy(struct mapped_device *md, bool wait) |
2476 | { |
2477 | struct dm_table *map; |
2478 | int srcu_idx; |
2479 | |
2480 | might_sleep(); |
2481 | |
2482 | spin_lock(lock: &_minor_lock); |
2483 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
2484 | set_bit(DMF_FREEING, addr: &md->flags); |
2485 | spin_unlock(lock: &_minor_lock); |
2486 | |
2487 | blk_mark_disk_dead(disk: md->disk); |
2488 | |
2489 | /* |
2490 | * Take suspend_lock so that presuspend and postsuspend methods |
2491 | * do not race with internal suspend. |
2492 | */ |
2493 | mutex_lock(&md->suspend_lock); |
2494 | map = dm_get_live_table(md, srcu_idx: &srcu_idx); |
2495 | if (!dm_suspended_md(md)) { |
2496 | dm_table_presuspend_targets(t: map); |
2497 | set_bit(DMF_SUSPENDED, addr: &md->flags); |
2498 | set_bit(DMF_POST_SUSPENDING, addr: &md->flags); |
2499 | dm_table_postsuspend_targets(t: map); |
2500 | } |
2501 | /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */ |
2502 | dm_put_live_table(md, srcu_idx); |
2503 | mutex_unlock(lock: &md->suspend_lock); |
2504 | |
2505 | /* |
2506 | * Rare, but there may be I/O requests still going to complete, |
2507 | * for example. Wait for all references to disappear. |
2508 | * No one should increment the reference count of the mapped_device, |
2509 | * after the mapped_device state becomes DMF_FREEING. |
2510 | */ |
2511 | if (wait) |
2512 | while (atomic_read(v: &md->holders)) |
2513 | fsleep(usecs: 1000); |
2514 | else if (atomic_read(v: &md->holders)) |
2515 | DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)" , |
2516 | dm_device_name(md), atomic_read(&md->holders)); |
2517 | |
2518 | dm_table_destroy(t: __unbind(md)); |
2519 | free_dev(md); |
2520 | } |
2521 | |
2522 | void dm_destroy(struct mapped_device *md) |
2523 | { |
2524 | __dm_destroy(md, wait: true); |
2525 | } |
2526 | |
2527 | void dm_destroy_immediate(struct mapped_device *md) |
2528 | { |
2529 | __dm_destroy(md, wait: false); |
2530 | } |
2531 | |
2532 | void dm_put(struct mapped_device *md) |
2533 | { |
2534 | atomic_dec(v: &md->holders); |
2535 | } |
2536 | EXPORT_SYMBOL_GPL(dm_put); |
2537 | |
2538 | static bool dm_in_flight_bios(struct mapped_device *md) |
2539 | { |
2540 | int cpu; |
2541 | unsigned long sum = 0; |
2542 | |
2543 | for_each_possible_cpu(cpu) |
2544 | sum += *per_cpu_ptr(md->pending_io, cpu); |
2545 | |
2546 | return sum != 0; |
2547 | } |
2548 | |
2549 | static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) |
2550 | { |
2551 | int r = 0; |
2552 | DEFINE_WAIT(wait); |
2553 | |
2554 | while (true) { |
2555 | prepare_to_wait(wq_head: &md->wait, wq_entry: &wait, state: task_state); |
2556 | |
2557 | if (!dm_in_flight_bios(md)) |
2558 | break; |
2559 | |
2560 | if (signal_pending_state(state: task_state, current)) { |
2561 | r = -EINTR; |
2562 | break; |
2563 | } |
2564 | |
2565 | io_schedule(); |
2566 | } |
2567 | finish_wait(wq_head: &md->wait, wq_entry: &wait); |
2568 | |
2569 | smp_rmb(); |
2570 | |
2571 | return r; |
2572 | } |
2573 | |
2574 | static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) |
2575 | { |
2576 | int r = 0; |
2577 | |
2578 | if (!queue_is_mq(q: md->queue)) |
2579 | return dm_wait_for_bios_completion(md, task_state); |
2580 | |
2581 | while (true) { |
2582 | if (!blk_mq_queue_inflight(q: md->queue)) |
2583 | break; |
2584 | |
2585 | if (signal_pending_state(state: task_state, current)) { |
2586 | r = -EINTR; |
2587 | break; |
2588 | } |
2589 | |
2590 | fsleep(usecs: 5000); |
2591 | } |
2592 | |
2593 | return r; |
2594 | } |
2595 | |
2596 | /* |
2597 | * Process the deferred bios |
2598 | */ |
2599 | static void dm_wq_work(struct work_struct *work) |
2600 | { |
2601 | struct mapped_device *md = container_of(work, struct mapped_device, work); |
2602 | struct bio *bio; |
2603 | |
2604 | while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
2605 | spin_lock_irq(lock: &md->deferred_lock); |
2606 | bio = bio_list_pop(bl: &md->deferred); |
2607 | spin_unlock_irq(lock: &md->deferred_lock); |
2608 | |
2609 | if (!bio) |
2610 | break; |
2611 | |
2612 | submit_bio_noacct(bio); |
2613 | cond_resched(); |
2614 | } |
2615 | } |
2616 | |
2617 | static void dm_queue_flush(struct mapped_device *md) |
2618 | { |
2619 | clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, addr: &md->flags); |
2620 | smp_mb__after_atomic(); |
2621 | queue_work(wq: md->wq, work: &md->work); |
2622 | } |
2623 | |
2624 | /* |
2625 | * Swap in a new table, returning the old one for the caller to destroy. |
2626 | */ |
2627 | struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) |
2628 | { |
2629 | struct dm_table *live_map = NULL, *map = ERR_PTR(error: -EINVAL); |
2630 | struct queue_limits limits; |
2631 | int r; |
2632 | |
2633 | mutex_lock(&md->suspend_lock); |
2634 | |
2635 | /* device must be suspended */ |
2636 | if (!dm_suspended_md(md)) |
2637 | goto out; |
2638 | |
2639 | /* |
2640 | * If the new table has no data devices, retain the existing limits. |
2641 | * This helps multipath with queue_if_no_path if all paths disappear, |
2642 | * then new I/O is queued based on these limits, and then some paths |
2643 | * reappear. |
2644 | */ |
2645 | if (dm_table_has_no_data_devices(table)) { |
2646 | live_map = dm_get_live_table_fast(md); |
2647 | if (live_map) |
2648 | limits = md->queue->limits; |
2649 | dm_put_live_table_fast(md); |
2650 | } |
2651 | |
2652 | if (!live_map) { |
2653 | r = dm_calculate_queue_limits(table, limits: &limits); |
2654 | if (r) { |
2655 | map = ERR_PTR(error: r); |
2656 | goto out; |
2657 | } |
2658 | } |
2659 | |
2660 | map = __bind(md, t: table, limits: &limits); |
2661 | dm_issue_global_event(); |
2662 | |
2663 | out: |
2664 | mutex_unlock(lock: &md->suspend_lock); |
2665 | return map; |
2666 | } |
2667 | |
2668 | /* |
2669 | * Functions to lock and unlock any filesystem running on the |
2670 | * device. |
2671 | */ |
2672 | static int lock_fs(struct mapped_device *md) |
2673 | { |
2674 | int r; |
2675 | |
2676 | WARN_ON(test_bit(DMF_FROZEN, &md->flags)); |
2677 | |
2678 | r = freeze_bdev(bdev: md->disk->part0); |
2679 | if (!r) |
2680 | set_bit(DMF_FROZEN, addr: &md->flags); |
2681 | return r; |
2682 | } |
2683 | |
2684 | static void unlock_fs(struct mapped_device *md) |
2685 | { |
2686 | if (!test_bit(DMF_FROZEN, &md->flags)) |
2687 | return; |
2688 | thaw_bdev(bdev: md->disk->part0); |
2689 | clear_bit(DMF_FROZEN, addr: &md->flags); |
2690 | } |
2691 | |
2692 | /* |
2693 | * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG |
2694 | * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE |
2695 | * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY |
2696 | * |
2697 | * If __dm_suspend returns 0, the device is completely quiescent |
2698 | * now. There is no request-processing activity. All new requests |
2699 | * are being added to md->deferred list. |
2700 | */ |
2701 | static int __dm_suspend(struct mapped_device *md, struct dm_table *map, |
2702 | unsigned int suspend_flags, unsigned int task_state, |
2703 | int dmf_suspended_flag) |
2704 | { |
2705 | bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; |
2706 | bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; |
2707 | int r; |
2708 | |
2709 | lockdep_assert_held(&md->suspend_lock); |
2710 | |
2711 | /* |
2712 | * DMF_NOFLUSH_SUSPENDING must be set before presuspend. |
2713 | * This flag is cleared before dm_suspend returns. |
2714 | */ |
2715 | if (noflush) |
2716 | set_bit(DMF_NOFLUSH_SUSPENDING, addr: &md->flags); |
2717 | else |
2718 | DMDEBUG("%s: suspending with flush" , dm_device_name(md)); |
2719 | |
2720 | /* |
2721 | * This gets reverted if there's an error later and the targets |
2722 | * provide the .presuspend_undo hook. |
2723 | */ |
2724 | dm_table_presuspend_targets(t: map); |
2725 | |
2726 | /* |
2727 | * Flush I/O to the device. |
2728 | * Any I/O submitted after lock_fs() may not be flushed. |
2729 | * noflush takes precedence over do_lockfs. |
2730 | * (lock_fs() flushes I/Os and waits for them to complete.) |
2731 | */ |
2732 | if (!noflush && do_lockfs) { |
2733 | r = lock_fs(md); |
2734 | if (r) { |
2735 | dm_table_presuspend_undo_targets(t: map); |
2736 | return r; |
2737 | } |
2738 | } |
2739 | |
2740 | /* |
2741 | * Here we must make sure that no processes are submitting requests |
2742 | * to target drivers i.e. no one may be executing |
2743 | * dm_split_and_process_bio from dm_submit_bio. |
2744 | * |
2745 | * To get all processes out of dm_split_and_process_bio in dm_submit_bio, |
2746 | * we take the write lock. To prevent any process from reentering |
2747 | * dm_split_and_process_bio from dm_submit_bio and quiesce the thread |
2748 | * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call |
2749 | * flush_workqueue(md->wq). |
2750 | */ |
2751 | set_bit(DMF_BLOCK_IO_FOR_SUSPEND, addr: &md->flags); |
2752 | if (map) |
2753 | synchronize_srcu(ssp: &md->io_barrier); |
2754 | |
2755 | /* |
2756 | * Stop md->queue before flushing md->wq in case request-based |
2757 | * dm defers requests to md->wq from md->queue. |
2758 | */ |
2759 | if (dm_request_based(md)) |
2760 | dm_stop_queue(q: md->queue); |
2761 | |
2762 | flush_workqueue(md->wq); |
2763 | |
2764 | /* |
2765 | * At this point no more requests are entering target request routines. |
2766 | * We call dm_wait_for_completion to wait for all existing requests |
2767 | * to finish. |
2768 | */ |
2769 | r = dm_wait_for_completion(md, task_state); |
2770 | if (!r) |
2771 | set_bit(nr: dmf_suspended_flag, addr: &md->flags); |
2772 | |
2773 | if (noflush) |
2774 | clear_bit(DMF_NOFLUSH_SUSPENDING, addr: &md->flags); |
2775 | if (map) |
2776 | synchronize_srcu(ssp: &md->io_barrier); |
2777 | |
2778 | /* were we interrupted ? */ |
2779 | if (r < 0) { |
2780 | dm_queue_flush(md); |
2781 | |
2782 | if (dm_request_based(md)) |
2783 | dm_start_queue(q: md->queue); |
2784 | |
2785 | unlock_fs(md); |
2786 | dm_table_presuspend_undo_targets(t: map); |
2787 | /* pushback list is already flushed, so skip flush */ |
2788 | } |
2789 | |
2790 | return r; |
2791 | } |
2792 | |
2793 | /* |
2794 | * We need to be able to change a mapping table under a mounted |
2795 | * filesystem. For example we might want to move some data in |
2796 | * the background. Before the table can be swapped with |
2797 | * dm_bind_table, dm_suspend must be called to flush any in |
2798 | * flight bios and ensure that any further io gets deferred. |
2799 | */ |
2800 | /* |
2801 | * Suspend mechanism in request-based dm. |
2802 | * |
2803 | * 1. Flush all I/Os by lock_fs() if needed. |
2804 | * 2. Stop dispatching any I/O by stopping the request_queue. |
2805 | * 3. Wait for all in-flight I/Os to be completed or requeued. |
2806 | * |
2807 | * To abort suspend, start the request_queue. |
2808 | */ |
2809 | int dm_suspend(struct mapped_device *md, unsigned int suspend_flags) |
2810 | { |
2811 | struct dm_table *map = NULL; |
2812 | int r = 0; |
2813 | |
2814 | retry: |
2815 | mutex_lock_nested(lock: &md->suspend_lock, SINGLE_DEPTH_NESTING); |
2816 | |
2817 | if (dm_suspended_md(md)) { |
2818 | r = -EINVAL; |
2819 | goto out_unlock; |
2820 | } |
2821 | |
2822 | if (dm_suspended_internally_md(md)) { |
2823 | /* already internally suspended, wait for internal resume */ |
2824 | mutex_unlock(lock: &md->suspend_lock); |
2825 | r = wait_on_bit(word: &md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); |
2826 | if (r) |
2827 | return r; |
2828 | goto retry; |
2829 | } |
2830 | |
2831 | map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); |
2832 | if (!map) { |
2833 | /* avoid deadlock with fs/namespace.c:do_mount() */ |
2834 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; |
2835 | } |
2836 | |
2837 | r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); |
2838 | if (r) |
2839 | goto out_unlock; |
2840 | |
2841 | set_bit(DMF_POST_SUSPENDING, addr: &md->flags); |
2842 | dm_table_postsuspend_targets(t: map); |
2843 | clear_bit(DMF_POST_SUSPENDING, addr: &md->flags); |
2844 | |
2845 | out_unlock: |
2846 | mutex_unlock(lock: &md->suspend_lock); |
2847 | return r; |
2848 | } |
2849 | |
2850 | static int __dm_resume(struct mapped_device *md, struct dm_table *map) |
2851 | { |
2852 | if (map) { |
2853 | int r = dm_table_resume_targets(t: map); |
2854 | |
2855 | if (r) |
2856 | return r; |
2857 | } |
2858 | |
2859 | dm_queue_flush(md); |
2860 | |
2861 | /* |
2862 | * Flushing deferred I/Os must be done after targets are resumed |
2863 | * so that mapping of targets can work correctly. |
2864 | * Request-based dm is queueing the deferred I/Os in its request_queue. |
2865 | */ |
2866 | if (dm_request_based(md)) |
2867 | dm_start_queue(q: md->queue); |
2868 | |
2869 | unlock_fs(md); |
2870 | |
2871 | return 0; |
2872 | } |
2873 | |
2874 | int dm_resume(struct mapped_device *md) |
2875 | { |
2876 | int r; |
2877 | struct dm_table *map = NULL; |
2878 | |
2879 | retry: |
2880 | r = -EINVAL; |
2881 | mutex_lock_nested(lock: &md->suspend_lock, SINGLE_DEPTH_NESTING); |
2882 | |
2883 | if (!dm_suspended_md(md)) |
2884 | goto out; |
2885 | |
2886 | if (dm_suspended_internally_md(md)) { |
2887 | /* already internally suspended, wait for internal resume */ |
2888 | mutex_unlock(lock: &md->suspend_lock); |
2889 | r = wait_on_bit(word: &md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); |
2890 | if (r) |
2891 | return r; |
2892 | goto retry; |
2893 | } |
2894 | |
2895 | map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); |
2896 | if (!map || !dm_table_get_size(t: map)) |
2897 | goto out; |
2898 | |
2899 | r = __dm_resume(md, map); |
2900 | if (r) |
2901 | goto out; |
2902 | |
2903 | clear_bit(DMF_SUSPENDED, addr: &md->flags); |
2904 | out: |
2905 | mutex_unlock(lock: &md->suspend_lock); |
2906 | |
2907 | return r; |
2908 | } |
2909 | |
2910 | /* |
2911 | * Internal suspend/resume works like userspace-driven suspend. It waits |
2912 | * until all bios finish and prevents issuing new bios to the target drivers. |
2913 | * It may be used only from the kernel. |
2914 | */ |
2915 | |
2916 | static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags) |
2917 | { |
2918 | struct dm_table *map = NULL; |
2919 | |
2920 | lockdep_assert_held(&md->suspend_lock); |
2921 | |
2922 | if (md->internal_suspend_count++) |
2923 | return; /* nested internal suspend */ |
2924 | |
2925 | if (dm_suspended_md(md)) { |
2926 | set_bit(DMF_SUSPENDED_INTERNALLY, addr: &md->flags); |
2927 | return; /* nest suspend */ |
2928 | } |
2929 | |
2930 | map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); |
2931 | |
2932 | /* |
2933 | * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is |
2934 | * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend |
2935 | * would require changing .presuspend to return an error -- avoid this |
2936 | * until there is a need for more elaborate variants of internal suspend. |
2937 | */ |
2938 | (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, |
2939 | DMF_SUSPENDED_INTERNALLY); |
2940 | |
2941 | set_bit(DMF_POST_SUSPENDING, addr: &md->flags); |
2942 | dm_table_postsuspend_targets(t: map); |
2943 | clear_bit(DMF_POST_SUSPENDING, addr: &md->flags); |
2944 | } |
2945 | |
2946 | static void __dm_internal_resume(struct mapped_device *md) |
2947 | { |
2948 | BUG_ON(!md->internal_suspend_count); |
2949 | |
2950 | if (--md->internal_suspend_count) |
2951 | return; /* resume from nested internal suspend */ |
2952 | |
2953 | if (dm_suspended_md(md)) |
2954 | goto done; /* resume from nested suspend */ |
2955 | |
2956 | /* |
2957 | * NOTE: existing callers don't need to call dm_table_resume_targets |
2958 | * (which may fail -- so best to avoid it for now by passing NULL map) |
2959 | */ |
2960 | (void) __dm_resume(md, NULL); |
2961 | |
2962 | done: |
2963 | clear_bit(DMF_SUSPENDED_INTERNALLY, addr: &md->flags); |
2964 | smp_mb__after_atomic(); |
2965 | wake_up_bit(word: &md->flags, DMF_SUSPENDED_INTERNALLY); |
2966 | } |
2967 | |
2968 | void dm_internal_suspend_noflush(struct mapped_device *md) |
2969 | { |
2970 | mutex_lock(&md->suspend_lock); |
2971 | __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); |
2972 | mutex_unlock(lock: &md->suspend_lock); |
2973 | } |
2974 | EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); |
2975 | |
2976 | void dm_internal_resume(struct mapped_device *md) |
2977 | { |
2978 | mutex_lock(&md->suspend_lock); |
2979 | __dm_internal_resume(md); |
2980 | mutex_unlock(lock: &md->suspend_lock); |
2981 | } |
2982 | EXPORT_SYMBOL_GPL(dm_internal_resume); |
2983 | |
2984 | /* |
2985 | * Fast variants of internal suspend/resume hold md->suspend_lock, |
2986 | * which prevents interaction with userspace-driven suspend. |
2987 | */ |
2988 | |
2989 | void dm_internal_suspend_fast(struct mapped_device *md) |
2990 | { |
2991 | mutex_lock(&md->suspend_lock); |
2992 | if (dm_suspended_md(md) || dm_suspended_internally_md(md)) |
2993 | return; |
2994 | |
2995 | set_bit(DMF_BLOCK_IO_FOR_SUSPEND, addr: &md->flags); |
2996 | synchronize_srcu(ssp: &md->io_barrier); |
2997 | flush_workqueue(md->wq); |
2998 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); |
2999 | } |
3000 | EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); |
3001 | |
3002 | void dm_internal_resume_fast(struct mapped_device *md) |
3003 | { |
3004 | if (dm_suspended_md(md) || dm_suspended_internally_md(md)) |
3005 | goto done; |
3006 | |
3007 | dm_queue_flush(md); |
3008 | |
3009 | done: |
3010 | mutex_unlock(lock: &md->suspend_lock); |
3011 | } |
3012 | EXPORT_SYMBOL_GPL(dm_internal_resume_fast); |
3013 | |
3014 | /* |
3015 | *--------------------------------------------------------------- |
3016 | * Event notification. |
3017 | *--------------------------------------------------------------- |
3018 | */ |
3019 | int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
3020 | unsigned int cookie, bool need_resize_uevent) |
3021 | { |
3022 | int r; |
3023 | unsigned int noio_flag; |
3024 | char udev_cookie[DM_COOKIE_LENGTH]; |
3025 | char *envp[3] = { NULL, NULL, NULL }; |
3026 | char **envpp = envp; |
3027 | if (cookie) { |
3028 | snprintf(buf: udev_cookie, DM_COOKIE_LENGTH, fmt: "%s=%u" , |
3029 | DM_COOKIE_ENV_VAR_NAME, cookie); |
3030 | *envpp++ = udev_cookie; |
3031 | } |
3032 | if (need_resize_uevent) { |
3033 | *envpp++ = "RESIZE=1" ; |
3034 | } |
3035 | |
3036 | noio_flag = memalloc_noio_save(); |
3037 | |
3038 | r = kobject_uevent_env(kobj: &disk_to_dev(md->disk)->kobj, action, envp); |
3039 | |
3040 | memalloc_noio_restore(flags: noio_flag); |
3041 | |
3042 | return r; |
3043 | } |
3044 | |
3045 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |
3046 | { |
3047 | return atomic_add_return(i: 1, v: &md->uevent_seq); |
3048 | } |
3049 | |
3050 | uint32_t dm_get_event_nr(struct mapped_device *md) |
3051 | { |
3052 | return atomic_read(v: &md->event_nr); |
3053 | } |
3054 | |
3055 | int dm_wait_event(struct mapped_device *md, int event_nr) |
3056 | { |
3057 | return wait_event_interruptible(md->eventq, |
3058 | (event_nr != atomic_read(&md->event_nr))); |
3059 | } |
3060 | |
3061 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist) |
3062 | { |
3063 | unsigned long flags; |
3064 | |
3065 | spin_lock_irqsave(&md->uevent_lock, flags); |
3066 | list_add(new: elist, head: &md->uevent_list); |
3067 | spin_unlock_irqrestore(lock: &md->uevent_lock, flags); |
3068 | } |
3069 | |
3070 | /* |
3071 | * The gendisk is only valid as long as you have a reference |
3072 | * count on 'md'. |
3073 | */ |
3074 | struct gendisk *dm_disk(struct mapped_device *md) |
3075 | { |
3076 | return md->disk; |
3077 | } |
3078 | EXPORT_SYMBOL_GPL(dm_disk); |
3079 | |
3080 | struct kobject *dm_kobject(struct mapped_device *md) |
3081 | { |
3082 | return &md->kobj_holder.kobj; |
3083 | } |
3084 | |
3085 | struct mapped_device *dm_get_from_kobject(struct kobject *kobj) |
3086 | { |
3087 | struct mapped_device *md; |
3088 | |
3089 | md = container_of(kobj, struct mapped_device, kobj_holder.kobj); |
3090 | |
3091 | spin_lock(lock: &_minor_lock); |
3092 | if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { |
3093 | md = NULL; |
3094 | goto out; |
3095 | } |
3096 | dm_get(md); |
3097 | out: |
3098 | spin_unlock(lock: &_minor_lock); |
3099 | |
3100 | return md; |
3101 | } |
3102 | |
3103 | int dm_suspended_md(struct mapped_device *md) |
3104 | { |
3105 | return test_bit(DMF_SUSPENDED, &md->flags); |
3106 | } |
3107 | |
3108 | static int dm_post_suspending_md(struct mapped_device *md) |
3109 | { |
3110 | return test_bit(DMF_POST_SUSPENDING, &md->flags); |
3111 | } |
3112 | |
3113 | int dm_suspended_internally_md(struct mapped_device *md) |
3114 | { |
3115 | return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); |
3116 | } |
3117 | |
3118 | int dm_test_deferred_remove_flag(struct mapped_device *md) |
3119 | { |
3120 | return test_bit(DMF_DEFERRED_REMOVE, &md->flags); |
3121 | } |
3122 | |
3123 | int dm_suspended(struct dm_target *ti) |
3124 | { |
3125 | return dm_suspended_md(md: ti->table->md); |
3126 | } |
3127 | EXPORT_SYMBOL_GPL(dm_suspended); |
3128 | |
3129 | int dm_post_suspending(struct dm_target *ti) |
3130 | { |
3131 | return dm_post_suspending_md(md: ti->table->md); |
3132 | } |
3133 | EXPORT_SYMBOL_GPL(dm_post_suspending); |
3134 | |
3135 | int dm_noflush_suspending(struct dm_target *ti) |
3136 | { |
3137 | return __noflush_suspending(md: ti->table->md); |
3138 | } |
3139 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); |
3140 | |
3141 | void dm_free_md_mempools(struct dm_md_mempools *pools) |
3142 | { |
3143 | if (!pools) |
3144 | return; |
3145 | |
3146 | bioset_exit(&pools->bs); |
3147 | bioset_exit(&pools->io_bs); |
3148 | |
3149 | kfree(objp: pools); |
3150 | } |
3151 | |
3152 | struct dm_pr { |
3153 | u64 old_key; |
3154 | u64 new_key; |
3155 | u32 flags; |
3156 | bool abort; |
3157 | bool fail_early; |
3158 | int ret; |
3159 | enum pr_type type; |
3160 | struct pr_keys *read_keys; |
3161 | struct pr_held_reservation *rsv; |
3162 | }; |
3163 | |
3164 | static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, |
3165 | struct dm_pr *pr) |
3166 | { |
3167 | struct mapped_device *md = bdev->bd_disk->private_data; |
3168 | struct dm_table *table; |
3169 | struct dm_target *ti; |
3170 | int ret = -ENOTTY, srcu_idx; |
3171 | |
3172 | table = dm_get_live_table(md, srcu_idx: &srcu_idx); |
3173 | if (!table || !dm_table_get_size(t: table)) |
3174 | goto out; |
3175 | |
3176 | /* We only support devices that have a single target */ |
3177 | if (table->num_targets != 1) |
3178 | goto out; |
3179 | ti = dm_table_get_target(t: table, index: 0); |
3180 | |
3181 | if (dm_suspended_md(md)) { |
3182 | ret = -EAGAIN; |
3183 | goto out; |
3184 | } |
3185 | |
3186 | ret = -EINVAL; |
3187 | if (!ti->type->iterate_devices) |
3188 | goto out; |
3189 | |
3190 | ti->type->iterate_devices(ti, fn, pr); |
3191 | ret = 0; |
3192 | out: |
3193 | dm_put_live_table(md, srcu_idx); |
3194 | return ret; |
3195 | } |
3196 | |
3197 | /* |
3198 | * For register / unregister we need to manually call out to every path. |
3199 | */ |
3200 | static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, |
3201 | sector_t start, sector_t len, void *data) |
3202 | { |
3203 | struct dm_pr *pr = data; |
3204 | const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; |
3205 | int ret; |
3206 | |
3207 | if (!ops || !ops->pr_register) { |
3208 | pr->ret = -EOPNOTSUPP; |
3209 | return -1; |
3210 | } |
3211 | |
3212 | ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); |
3213 | if (!ret) |
3214 | return 0; |
3215 | |
3216 | if (!pr->ret) |
3217 | pr->ret = ret; |
3218 | |
3219 | if (pr->fail_early) |
3220 | return -1; |
3221 | |
3222 | return 0; |
3223 | } |
3224 | |
3225 | static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, |
3226 | u32 flags) |
3227 | { |
3228 | struct dm_pr pr = { |
3229 | .old_key = old_key, |
3230 | .new_key = new_key, |
3231 | .flags = flags, |
3232 | .fail_early = true, |
3233 | .ret = 0, |
3234 | }; |
3235 | int ret; |
3236 | |
3237 | ret = dm_call_pr(bdev, fn: __dm_pr_register, pr: &pr); |
3238 | if (ret) { |
3239 | /* Didn't even get to register a path */ |
3240 | return ret; |
3241 | } |
3242 | |
3243 | if (!pr.ret) |
3244 | return 0; |
3245 | ret = pr.ret; |
3246 | |
3247 | if (!new_key) |
3248 | return ret; |
3249 | |
3250 | /* unregister all paths if we failed to register any path */ |
3251 | pr.old_key = new_key; |
3252 | pr.new_key = 0; |
3253 | pr.flags = 0; |
3254 | pr.fail_early = false; |
3255 | (void) dm_call_pr(bdev, fn: __dm_pr_register, pr: &pr); |
3256 | return ret; |
3257 | } |
3258 | |
3259 | |
3260 | static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev, |
3261 | sector_t start, sector_t len, void *data) |
3262 | { |
3263 | struct dm_pr *pr = data; |
3264 | const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; |
3265 | |
3266 | if (!ops || !ops->pr_reserve) { |
3267 | pr->ret = -EOPNOTSUPP; |
3268 | return -1; |
3269 | } |
3270 | |
3271 | pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); |
3272 | if (!pr->ret) |
3273 | return -1; |
3274 | |
3275 | return 0; |
3276 | } |
3277 | |
3278 | static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, |
3279 | u32 flags) |
3280 | { |
3281 | struct dm_pr pr = { |
3282 | .old_key = key, |
3283 | .flags = flags, |
3284 | .type = type, |
3285 | .fail_early = false, |
3286 | .ret = 0, |
3287 | }; |
3288 | int ret; |
3289 | |
3290 | ret = dm_call_pr(bdev, fn: __dm_pr_reserve, pr: &pr); |
3291 | if (ret) |
3292 | return ret; |
3293 | |
3294 | return pr.ret; |
3295 | } |
3296 | |
3297 | /* |
3298 | * If there is a non-All Registrants type of reservation, the release must be |
3299 | * sent down the holding path. For the cases where there is no reservation or |
3300 | * the path is not the holder the device will also return success, so we must |
3301 | * try each path to make sure we got the correct path. |
3302 | */ |
3303 | static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev, |
3304 | sector_t start, sector_t len, void *data) |
3305 | { |
3306 | struct dm_pr *pr = data; |
3307 | const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; |
3308 | |
3309 | if (!ops || !ops->pr_release) { |
3310 | pr->ret = -EOPNOTSUPP; |
3311 | return -1; |
3312 | } |
3313 | |
3314 | pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); |
3315 | if (pr->ret) |
3316 | return -1; |
3317 | |
3318 | return 0; |
3319 | } |
3320 | |
3321 | static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) |
3322 | { |
3323 | struct dm_pr pr = { |
3324 | .old_key = key, |
3325 | .type = type, |
3326 | .fail_early = false, |
3327 | }; |
3328 | int ret; |
3329 | |
3330 | ret = dm_call_pr(bdev, fn: __dm_pr_release, pr: &pr); |
3331 | if (ret) |
3332 | return ret; |
3333 | |
3334 | return pr.ret; |
3335 | } |
3336 | |
3337 | static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev, |
3338 | sector_t start, sector_t len, void *data) |
3339 | { |
3340 | struct dm_pr *pr = data; |
3341 | const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; |
3342 | |
3343 | if (!ops || !ops->pr_preempt) { |
3344 | pr->ret = -EOPNOTSUPP; |
3345 | return -1; |
3346 | } |
3347 | |
3348 | pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, |
3349 | pr->abort); |
3350 | if (!pr->ret) |
3351 | return -1; |
3352 | |
3353 | return 0; |
3354 | } |
3355 | |
3356 | static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, |
3357 | enum pr_type type, bool abort) |
3358 | { |
3359 | struct dm_pr pr = { |
3360 | .new_key = new_key, |
3361 | .old_key = old_key, |
3362 | .type = type, |
3363 | .fail_early = false, |
3364 | }; |
3365 | int ret; |
3366 | |
3367 | ret = dm_call_pr(bdev, fn: __dm_pr_preempt, pr: &pr); |
3368 | if (ret) |
3369 | return ret; |
3370 | |
3371 | return pr.ret; |
3372 | } |
3373 | |
3374 | static int dm_pr_clear(struct block_device *bdev, u64 key) |
3375 | { |
3376 | struct mapped_device *md = bdev->bd_disk->private_data; |
3377 | const struct pr_ops *ops; |
3378 | int r, srcu_idx; |
3379 | |
3380 | r = dm_prepare_ioctl(md, srcu_idx: &srcu_idx, bdev: &bdev); |
3381 | if (r < 0) |
3382 | goto out; |
3383 | |
3384 | ops = bdev->bd_disk->fops->pr_ops; |
3385 | if (ops && ops->pr_clear) |
3386 | r = ops->pr_clear(bdev, key); |
3387 | else |
3388 | r = -EOPNOTSUPP; |
3389 | out: |
3390 | dm_unprepare_ioctl(md, srcu_idx); |
3391 | return r; |
3392 | } |
3393 | |
3394 | static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev, |
3395 | sector_t start, sector_t len, void *data) |
3396 | { |
3397 | struct dm_pr *pr = data; |
3398 | const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; |
3399 | |
3400 | if (!ops || !ops->pr_read_keys) { |
3401 | pr->ret = -EOPNOTSUPP; |
3402 | return -1; |
3403 | } |
3404 | |
3405 | pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys); |
3406 | if (!pr->ret) |
3407 | return -1; |
3408 | |
3409 | return 0; |
3410 | } |
3411 | |
3412 | static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys) |
3413 | { |
3414 | struct dm_pr pr = { |
3415 | .read_keys = keys, |
3416 | }; |
3417 | int ret; |
3418 | |
3419 | ret = dm_call_pr(bdev, fn: __dm_pr_read_keys, pr: &pr); |
3420 | if (ret) |
3421 | return ret; |
3422 | |
3423 | return pr.ret; |
3424 | } |
3425 | |
3426 | static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev, |
3427 | sector_t start, sector_t len, void *data) |
3428 | { |
3429 | struct dm_pr *pr = data; |
3430 | const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; |
3431 | |
3432 | if (!ops || !ops->pr_read_reservation) { |
3433 | pr->ret = -EOPNOTSUPP; |
3434 | return -1; |
3435 | } |
3436 | |
3437 | pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv); |
3438 | if (!pr->ret) |
3439 | return -1; |
3440 | |
3441 | return 0; |
3442 | } |
3443 | |
3444 | static int dm_pr_read_reservation(struct block_device *bdev, |
3445 | struct pr_held_reservation *rsv) |
3446 | { |
3447 | struct dm_pr pr = { |
3448 | .rsv = rsv, |
3449 | }; |
3450 | int ret; |
3451 | |
3452 | ret = dm_call_pr(bdev, fn: __dm_pr_read_reservation, pr: &pr); |
3453 | if (ret) |
3454 | return ret; |
3455 | |
3456 | return pr.ret; |
3457 | } |
3458 | |
3459 | static const struct pr_ops dm_pr_ops = { |
3460 | .pr_register = dm_pr_register, |
3461 | .pr_reserve = dm_pr_reserve, |
3462 | .pr_release = dm_pr_release, |
3463 | .pr_preempt = dm_pr_preempt, |
3464 | .pr_clear = dm_pr_clear, |
3465 | .pr_read_keys = dm_pr_read_keys, |
3466 | .pr_read_reservation = dm_pr_read_reservation, |
3467 | }; |
3468 | |
3469 | static const struct block_device_operations dm_blk_dops = { |
3470 | .submit_bio = dm_submit_bio, |
3471 | .poll_bio = dm_poll_bio, |
3472 | .open = dm_blk_open, |
3473 | .release = dm_blk_close, |
3474 | .ioctl = dm_blk_ioctl, |
3475 | .getgeo = dm_blk_getgeo, |
3476 | .report_zones = dm_blk_report_zones, |
3477 | .pr_ops = &dm_pr_ops, |
3478 | .owner = THIS_MODULE |
3479 | }; |
3480 | |
3481 | static const struct block_device_operations dm_rq_blk_dops = { |
3482 | .open = dm_blk_open, |
3483 | .release = dm_blk_close, |
3484 | .ioctl = dm_blk_ioctl, |
3485 | .getgeo = dm_blk_getgeo, |
3486 | .pr_ops = &dm_pr_ops, |
3487 | .owner = THIS_MODULE |
3488 | }; |
3489 | |
3490 | static const struct dax_operations dm_dax_ops = { |
3491 | .direct_access = dm_dax_direct_access, |
3492 | .zero_page_range = dm_dax_zero_page_range, |
3493 | .recovery_write = dm_dax_recovery_write, |
3494 | }; |
3495 | |
3496 | /* |
3497 | * module hooks |
3498 | */ |
3499 | module_init(dm_init); |
3500 | module_exit(dm_exit); |
3501 | |
3502 | module_param(major, uint, 0); |
3503 | MODULE_PARM_DESC(major, "The major number of the device mapper" ); |
3504 | |
3505 | module_param(reserved_bio_based_ios, uint, 0644); |
3506 | MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools" ); |
3507 | |
3508 | module_param(dm_numa_node, int, 0644); |
3509 | MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations" ); |
3510 | |
3511 | module_param(swap_bios, int, 0644); |
3512 | MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs" ); |
3513 | |
3514 | MODULE_DESCRIPTION(DM_NAME " driver" ); |
3515 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>" ); |
3516 | MODULE_LICENSE("GPL" ); |
3517 | |