1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. |
4 | * |
5 | * This file is released under the GPL. |
6 | */ |
7 | |
8 | #include <linux/blkdev.h> |
9 | #include <linux/device-mapper.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/init.h> |
13 | #include <linux/kdev_t.h> |
14 | #include <linux/list.h> |
15 | #include <linux/list_bl.h> |
16 | #include <linux/mempool.h> |
17 | #include <linux/module.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/log2.h> |
21 | #include <linux/dm-kcopyd.h> |
22 | |
23 | #include "dm.h" |
24 | |
25 | #include "dm-exception-store.h" |
26 | |
27 | #define DM_MSG_PREFIX "snapshots" |
28 | |
29 | static const char dm_snapshot_merge_target_name[] = "snapshot-merge" ; |
30 | |
31 | #define dm_target_is_snapshot_merge(ti) \ |
32 | ((ti)->type->name == dm_snapshot_merge_target_name) |
33 | |
34 | /* |
35 | * The size of the mempool used to track chunks in use. |
36 | */ |
37 | #define MIN_IOS 256 |
38 | |
39 | #define DM_TRACKED_CHUNK_HASH_SIZE 16 |
40 | #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ |
41 | (DM_TRACKED_CHUNK_HASH_SIZE - 1)) |
42 | |
43 | struct dm_exception_table { |
44 | uint32_t hash_mask; |
45 | unsigned int hash_shift; |
46 | struct hlist_bl_head *table; |
47 | }; |
48 | |
49 | struct dm_snapshot { |
50 | struct rw_semaphore lock; |
51 | |
52 | struct dm_dev *origin; |
53 | struct dm_dev *cow; |
54 | |
55 | struct dm_target *ti; |
56 | |
57 | /* List of snapshots per Origin */ |
58 | struct list_head list; |
59 | |
60 | /* |
61 | * You can't use a snapshot if this is 0 (e.g. if full). |
62 | * A snapshot-merge target never clears this. |
63 | */ |
64 | int valid; |
65 | |
66 | /* |
67 | * The snapshot overflowed because of a write to the snapshot device. |
68 | * We don't have to invalidate the snapshot in this case, but we need |
69 | * to prevent further writes. |
70 | */ |
71 | int snapshot_overflowed; |
72 | |
73 | /* Origin writes don't trigger exceptions until this is set */ |
74 | int active; |
75 | |
76 | atomic_t pending_exceptions_count; |
77 | |
78 | spinlock_t pe_allocation_lock; |
79 | |
80 | /* Protected by "pe_allocation_lock" */ |
81 | sector_t exception_start_sequence; |
82 | |
83 | /* Protected by kcopyd single-threaded callback */ |
84 | sector_t exception_complete_sequence; |
85 | |
86 | /* |
87 | * A list of pending exceptions that completed out of order. |
88 | * Protected by kcopyd single-threaded callback. |
89 | */ |
90 | struct rb_root out_of_order_tree; |
91 | |
92 | mempool_t pending_pool; |
93 | |
94 | struct dm_exception_table pending; |
95 | struct dm_exception_table complete; |
96 | |
97 | /* |
98 | * pe_lock protects all pending_exception operations and access |
99 | * as well as the snapshot_bios list. |
100 | */ |
101 | spinlock_t pe_lock; |
102 | |
103 | /* Chunks with outstanding reads */ |
104 | spinlock_t tracked_chunk_lock; |
105 | struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; |
106 | |
107 | /* The on disk metadata handler */ |
108 | struct dm_exception_store *store; |
109 | |
110 | unsigned int in_progress; |
111 | struct wait_queue_head in_progress_wait; |
112 | |
113 | struct dm_kcopyd_client *kcopyd_client; |
114 | |
115 | /* Wait for events based on state_bits */ |
116 | unsigned long state_bits; |
117 | |
118 | /* Range of chunks currently being merged. */ |
119 | chunk_t first_merging_chunk; |
120 | int num_merging_chunks; |
121 | |
122 | /* |
123 | * The merge operation failed if this flag is set. |
124 | * Failure modes are handled as follows: |
125 | * - I/O error reading the header |
126 | * => don't load the target; abort. |
127 | * - Header does not have "valid" flag set |
128 | * => use the origin; forget about the snapshot. |
129 | * - I/O error when reading exceptions |
130 | * => don't load the target; abort. |
131 | * (We can't use the intermediate origin state.) |
132 | * - I/O error while merging |
133 | * => stop merging; set merge_failed; process I/O normally. |
134 | */ |
135 | bool merge_failed:1; |
136 | |
137 | bool discard_zeroes_cow:1; |
138 | bool discard_passdown_origin:1; |
139 | |
140 | /* |
141 | * Incoming bios that overlap with chunks being merged must wait |
142 | * for them to be committed. |
143 | */ |
144 | struct bio_list bios_queued_during_merge; |
145 | }; |
146 | |
147 | /* |
148 | * state_bits: |
149 | * RUNNING_MERGE - Merge operation is in progress. |
150 | * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; |
151 | * cleared afterwards. |
152 | */ |
153 | #define RUNNING_MERGE 0 |
154 | #define SHUTDOWN_MERGE 1 |
155 | |
156 | /* |
157 | * Maximum number of chunks being copied on write. |
158 | * |
159 | * The value was decided experimentally as a trade-off between memory |
160 | * consumption, stalling the kernel's workqueues and maintaining a high enough |
161 | * throughput. |
162 | */ |
163 | #define DEFAULT_COW_THRESHOLD 2048 |
164 | |
165 | static unsigned int cow_threshold = DEFAULT_COW_THRESHOLD; |
166 | module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644); |
167 | MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write" ); |
168 | |
169 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, |
170 | "A percentage of time allocated for copy on write" ); |
171 | |
172 | struct dm_dev *dm_snap_origin(struct dm_snapshot *s) |
173 | { |
174 | return s->origin; |
175 | } |
176 | EXPORT_SYMBOL(dm_snap_origin); |
177 | |
178 | struct dm_dev *dm_snap_cow(struct dm_snapshot *s) |
179 | { |
180 | return s->cow; |
181 | } |
182 | EXPORT_SYMBOL(dm_snap_cow); |
183 | |
184 | static sector_t chunk_to_sector(struct dm_exception_store *store, |
185 | chunk_t chunk) |
186 | { |
187 | return chunk << store->chunk_shift; |
188 | } |
189 | |
190 | static int bdev_equal(struct block_device *lhs, struct block_device *rhs) |
191 | { |
192 | /* |
193 | * There is only ever one instance of a particular block |
194 | * device so we can compare pointers safely. |
195 | */ |
196 | return lhs == rhs; |
197 | } |
198 | |
199 | struct dm_snap_pending_exception { |
200 | struct dm_exception e; |
201 | |
202 | /* |
203 | * Origin buffers waiting for this to complete are held |
204 | * in a bio list |
205 | */ |
206 | struct bio_list origin_bios; |
207 | struct bio_list snapshot_bios; |
208 | |
209 | /* Pointer back to snapshot context */ |
210 | struct dm_snapshot *snap; |
211 | |
212 | /* |
213 | * 1 indicates the exception has already been sent to |
214 | * kcopyd. |
215 | */ |
216 | int started; |
217 | |
218 | /* There was copying error. */ |
219 | int copy_error; |
220 | |
221 | /* A sequence number, it is used for in-order completion. */ |
222 | sector_t exception_sequence; |
223 | |
224 | struct rb_node out_of_order_node; |
225 | |
226 | /* |
227 | * For writing a complete chunk, bypassing the copy. |
228 | */ |
229 | struct bio *full_bio; |
230 | bio_end_io_t *full_bio_end_io; |
231 | }; |
232 | |
233 | /* |
234 | * Hash table mapping origin volumes to lists of snapshots and |
235 | * a lock to protect it |
236 | */ |
237 | static struct kmem_cache *exception_cache; |
238 | static struct kmem_cache *pending_cache; |
239 | |
240 | struct dm_snap_tracked_chunk { |
241 | struct hlist_node node; |
242 | chunk_t chunk; |
243 | }; |
244 | |
245 | static void init_tracked_chunk(struct bio *bio) |
246 | { |
247 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, data_size: sizeof(struct dm_snap_tracked_chunk)); |
248 | |
249 | INIT_HLIST_NODE(h: &c->node); |
250 | } |
251 | |
252 | static bool is_bio_tracked(struct bio *bio) |
253 | { |
254 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, data_size: sizeof(struct dm_snap_tracked_chunk)); |
255 | |
256 | return !hlist_unhashed(h: &c->node); |
257 | } |
258 | |
259 | static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) |
260 | { |
261 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, data_size: sizeof(struct dm_snap_tracked_chunk)); |
262 | |
263 | c->chunk = chunk; |
264 | |
265 | spin_lock_irq(lock: &s->tracked_chunk_lock); |
266 | hlist_add_head(n: &c->node, |
267 | h: &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); |
268 | spin_unlock_irq(lock: &s->tracked_chunk_lock); |
269 | } |
270 | |
271 | static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) |
272 | { |
273 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, data_size: sizeof(struct dm_snap_tracked_chunk)); |
274 | unsigned long flags; |
275 | |
276 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); |
277 | hlist_del(n: &c->node); |
278 | spin_unlock_irqrestore(lock: &s->tracked_chunk_lock, flags); |
279 | } |
280 | |
281 | static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) |
282 | { |
283 | struct dm_snap_tracked_chunk *c; |
284 | int found = 0; |
285 | |
286 | spin_lock_irq(lock: &s->tracked_chunk_lock); |
287 | |
288 | hlist_for_each_entry(c, |
289 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { |
290 | if (c->chunk == chunk) { |
291 | found = 1; |
292 | break; |
293 | } |
294 | } |
295 | |
296 | spin_unlock_irq(lock: &s->tracked_chunk_lock); |
297 | |
298 | return found; |
299 | } |
300 | |
301 | /* |
302 | * This conflicting I/O is extremely improbable in the caller, |
303 | * so fsleep(1000) is sufficient and there is no need for a wait queue. |
304 | */ |
305 | static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) |
306 | { |
307 | while (__chunk_is_tracked(s, chunk)) |
308 | fsleep(usecs: 1000); |
309 | } |
310 | |
311 | /* |
312 | * One of these per registered origin, held in the snapshot_origins hash |
313 | */ |
314 | struct origin { |
315 | /* The origin device */ |
316 | struct block_device *bdev; |
317 | |
318 | struct list_head hash_list; |
319 | |
320 | /* List of snapshots for this origin */ |
321 | struct list_head snapshots; |
322 | }; |
323 | |
324 | /* |
325 | * This structure is allocated for each origin target |
326 | */ |
327 | struct dm_origin { |
328 | struct dm_dev *dev; |
329 | struct dm_target *ti; |
330 | unsigned int split_boundary; |
331 | struct list_head hash_list; |
332 | }; |
333 | |
334 | /* |
335 | * Size of the hash table for origin volumes. If we make this |
336 | * the size of the minors list then it should be nearly perfect |
337 | */ |
338 | #define ORIGIN_HASH_SIZE 256 |
339 | #define ORIGIN_MASK 0xFF |
340 | static struct list_head *_origins; |
341 | static struct list_head *_dm_origins; |
342 | static struct rw_semaphore _origins_lock; |
343 | |
344 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); |
345 | static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); |
346 | static uint64_t _pending_exceptions_done_count; |
347 | |
348 | static int init_origin_hash(void) |
349 | { |
350 | int i; |
351 | |
352 | _origins = kmalloc_array(ORIGIN_HASH_SIZE, size: sizeof(struct list_head), |
353 | GFP_KERNEL); |
354 | if (!_origins) { |
355 | DMERR("unable to allocate memory for _origins" ); |
356 | return -ENOMEM; |
357 | } |
358 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) |
359 | INIT_LIST_HEAD(list: _origins + i); |
360 | |
361 | _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE, |
362 | size: sizeof(struct list_head), |
363 | GFP_KERNEL); |
364 | if (!_dm_origins) { |
365 | DMERR("unable to allocate memory for _dm_origins" ); |
366 | kfree(objp: _origins); |
367 | return -ENOMEM; |
368 | } |
369 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) |
370 | INIT_LIST_HEAD(list: _dm_origins + i); |
371 | |
372 | init_rwsem(&_origins_lock); |
373 | |
374 | return 0; |
375 | } |
376 | |
377 | static void exit_origin_hash(void) |
378 | { |
379 | kfree(objp: _origins); |
380 | kfree(objp: _dm_origins); |
381 | } |
382 | |
383 | static unsigned int origin_hash(struct block_device *bdev) |
384 | { |
385 | return bdev->bd_dev & ORIGIN_MASK; |
386 | } |
387 | |
388 | static struct origin *__lookup_origin(struct block_device *origin) |
389 | { |
390 | struct list_head *ol; |
391 | struct origin *o; |
392 | |
393 | ol = &_origins[origin_hash(bdev: origin)]; |
394 | list_for_each_entry(o, ol, hash_list) |
395 | if (bdev_equal(lhs: o->bdev, rhs: origin)) |
396 | return o; |
397 | |
398 | return NULL; |
399 | } |
400 | |
401 | static void __insert_origin(struct origin *o) |
402 | { |
403 | struct list_head *sl = &_origins[origin_hash(bdev: o->bdev)]; |
404 | |
405 | list_add_tail(new: &o->hash_list, head: sl); |
406 | } |
407 | |
408 | static struct dm_origin *__lookup_dm_origin(struct block_device *origin) |
409 | { |
410 | struct list_head *ol; |
411 | struct dm_origin *o; |
412 | |
413 | ol = &_dm_origins[origin_hash(bdev: origin)]; |
414 | list_for_each_entry(o, ol, hash_list) |
415 | if (bdev_equal(lhs: o->dev->bdev, rhs: origin)) |
416 | return o; |
417 | |
418 | return NULL; |
419 | } |
420 | |
421 | static void __insert_dm_origin(struct dm_origin *o) |
422 | { |
423 | struct list_head *sl = &_dm_origins[origin_hash(bdev: o->dev->bdev)]; |
424 | |
425 | list_add_tail(new: &o->hash_list, head: sl); |
426 | } |
427 | |
428 | static void __remove_dm_origin(struct dm_origin *o) |
429 | { |
430 | list_del(entry: &o->hash_list); |
431 | } |
432 | |
433 | /* |
434 | * _origins_lock must be held when calling this function. |
435 | * Returns number of snapshots registered using the supplied cow device, plus: |
436 | * snap_src - a snapshot suitable for use as a source of exception handover |
437 | * snap_dest - a snapshot capable of receiving exception handover. |
438 | * snap_merge - an existing snapshot-merge target linked to the same origin. |
439 | * There can be at most one snapshot-merge target. The parameter is optional. |
440 | * |
441 | * Possible return values and states of snap_src and snap_dest. |
442 | * 0: NULL, NULL - first new snapshot |
443 | * 1: snap_src, NULL - normal snapshot |
444 | * 2: snap_src, snap_dest - waiting for handover |
445 | * 2: snap_src, NULL - handed over, waiting for old to be deleted |
446 | * 1: NULL, snap_dest - source got destroyed without handover |
447 | */ |
448 | static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, |
449 | struct dm_snapshot **snap_src, |
450 | struct dm_snapshot **snap_dest, |
451 | struct dm_snapshot **snap_merge) |
452 | { |
453 | struct dm_snapshot *s; |
454 | struct origin *o; |
455 | int count = 0; |
456 | int active; |
457 | |
458 | o = __lookup_origin(origin: snap->origin->bdev); |
459 | if (!o) |
460 | goto out; |
461 | |
462 | list_for_each_entry(s, &o->snapshots, list) { |
463 | if (dm_target_is_snapshot_merge(s->ti) && snap_merge) |
464 | *snap_merge = s; |
465 | if (!bdev_equal(lhs: s->cow->bdev, rhs: snap->cow->bdev)) |
466 | continue; |
467 | |
468 | down_read(sem: &s->lock); |
469 | active = s->active; |
470 | up_read(sem: &s->lock); |
471 | |
472 | if (active) { |
473 | if (snap_src) |
474 | *snap_src = s; |
475 | } else if (snap_dest) |
476 | *snap_dest = s; |
477 | |
478 | count++; |
479 | } |
480 | |
481 | out: |
482 | return count; |
483 | } |
484 | |
485 | /* |
486 | * On success, returns 1 if this snapshot is a handover destination, |
487 | * otherwise returns 0. |
488 | */ |
489 | static int __validate_exception_handover(struct dm_snapshot *snap) |
490 | { |
491 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
492 | struct dm_snapshot *snap_merge = NULL; |
493 | |
494 | /* Does snapshot need exceptions handed over to it? */ |
495 | if ((__find_snapshots_sharing_cow(snap, snap_src: &snap_src, snap_dest: &snap_dest, |
496 | snap_merge: &snap_merge) == 2) || |
497 | snap_dest) { |
498 | snap->ti->error = "Snapshot cow pairing for exception table handover failed" ; |
499 | return -EINVAL; |
500 | } |
501 | |
502 | /* |
503 | * If no snap_src was found, snap cannot become a handover |
504 | * destination. |
505 | */ |
506 | if (!snap_src) |
507 | return 0; |
508 | |
509 | /* |
510 | * Non-snapshot-merge handover? |
511 | */ |
512 | if (!dm_target_is_snapshot_merge(snap->ti)) |
513 | return 1; |
514 | |
515 | /* |
516 | * Do not allow more than one merging snapshot. |
517 | */ |
518 | if (snap_merge) { |
519 | snap->ti->error = "A snapshot is already merging." ; |
520 | return -EINVAL; |
521 | } |
522 | |
523 | if (!snap_src->store->type->prepare_merge || |
524 | !snap_src->store->type->commit_merge) { |
525 | snap->ti->error = "Snapshot exception store does not support snapshot-merge." ; |
526 | return -EINVAL; |
527 | } |
528 | |
529 | return 1; |
530 | } |
531 | |
532 | static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) |
533 | { |
534 | struct dm_snapshot *l; |
535 | |
536 | /* Sort the list according to chunk size, largest-first smallest-last */ |
537 | list_for_each_entry(l, &o->snapshots, list) |
538 | if (l->store->chunk_size < s->store->chunk_size) |
539 | break; |
540 | list_add_tail(new: &s->list, head: &l->list); |
541 | } |
542 | |
543 | /* |
544 | * Make a note of the snapshot and its origin so we can look it |
545 | * up when the origin has a write on it. |
546 | * |
547 | * Also validate snapshot exception store handovers. |
548 | * On success, returns 1 if this registration is a handover destination, |
549 | * otherwise returns 0. |
550 | */ |
551 | static int register_snapshot(struct dm_snapshot *snap) |
552 | { |
553 | struct origin *o, *new_o = NULL; |
554 | struct block_device *bdev = snap->origin->bdev; |
555 | int r = 0; |
556 | |
557 | new_o = kmalloc(size: sizeof(*new_o), GFP_KERNEL); |
558 | if (!new_o) |
559 | return -ENOMEM; |
560 | |
561 | down_write(sem: &_origins_lock); |
562 | |
563 | r = __validate_exception_handover(snap); |
564 | if (r < 0) { |
565 | kfree(objp: new_o); |
566 | goto out; |
567 | } |
568 | |
569 | o = __lookup_origin(origin: bdev); |
570 | if (o) |
571 | kfree(objp: new_o); |
572 | else { |
573 | /* New origin */ |
574 | o = new_o; |
575 | |
576 | /* Initialise the struct */ |
577 | INIT_LIST_HEAD(list: &o->snapshots); |
578 | o->bdev = bdev; |
579 | |
580 | __insert_origin(o); |
581 | } |
582 | |
583 | __insert_snapshot(o, s: snap); |
584 | |
585 | out: |
586 | up_write(sem: &_origins_lock); |
587 | |
588 | return r; |
589 | } |
590 | |
591 | /* |
592 | * Move snapshot to correct place in list according to chunk size. |
593 | */ |
594 | static void reregister_snapshot(struct dm_snapshot *s) |
595 | { |
596 | struct block_device *bdev = s->origin->bdev; |
597 | |
598 | down_write(sem: &_origins_lock); |
599 | |
600 | list_del(entry: &s->list); |
601 | __insert_snapshot(o: __lookup_origin(origin: bdev), s); |
602 | |
603 | up_write(sem: &_origins_lock); |
604 | } |
605 | |
606 | static void unregister_snapshot(struct dm_snapshot *s) |
607 | { |
608 | struct origin *o; |
609 | |
610 | down_write(sem: &_origins_lock); |
611 | o = __lookup_origin(origin: s->origin->bdev); |
612 | |
613 | list_del(entry: &s->list); |
614 | if (o && list_empty(head: &o->snapshots)) { |
615 | list_del(entry: &o->hash_list); |
616 | kfree(objp: o); |
617 | } |
618 | |
619 | up_write(sem: &_origins_lock); |
620 | } |
621 | |
622 | /* |
623 | * Implementation of the exception hash tables. |
624 | * The lowest hash_shift bits of the chunk number are ignored, allowing |
625 | * some consecutive chunks to be grouped together. |
626 | */ |
627 | static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk); |
628 | |
629 | /* Lock to protect access to the completed and pending exception hash tables. */ |
630 | struct dm_exception_table_lock { |
631 | struct hlist_bl_head *complete_slot; |
632 | struct hlist_bl_head *pending_slot; |
633 | }; |
634 | |
635 | static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, |
636 | struct dm_exception_table_lock *lock) |
637 | { |
638 | struct dm_exception_table *complete = &s->complete; |
639 | struct dm_exception_table *pending = &s->pending; |
640 | |
641 | lock->complete_slot = &complete->table[exception_hash(et: complete, chunk)]; |
642 | lock->pending_slot = &pending->table[exception_hash(et: pending, chunk)]; |
643 | } |
644 | |
645 | static void dm_exception_table_lock(struct dm_exception_table_lock *lock) |
646 | { |
647 | hlist_bl_lock(b: lock->complete_slot); |
648 | hlist_bl_lock(b: lock->pending_slot); |
649 | } |
650 | |
651 | static void dm_exception_table_unlock(struct dm_exception_table_lock *lock) |
652 | { |
653 | hlist_bl_unlock(b: lock->pending_slot); |
654 | hlist_bl_unlock(b: lock->complete_slot); |
655 | } |
656 | |
657 | static int dm_exception_table_init(struct dm_exception_table *et, |
658 | uint32_t size, unsigned int hash_shift) |
659 | { |
660 | unsigned int i; |
661 | |
662 | et->hash_shift = hash_shift; |
663 | et->hash_mask = size - 1; |
664 | et->table = kvmalloc_array(n: size, size: sizeof(struct hlist_bl_head), |
665 | GFP_KERNEL); |
666 | if (!et->table) |
667 | return -ENOMEM; |
668 | |
669 | for (i = 0; i < size; i++) |
670 | INIT_HLIST_BL_HEAD(et->table + i); |
671 | |
672 | return 0; |
673 | } |
674 | |
675 | static void dm_exception_table_exit(struct dm_exception_table *et, |
676 | struct kmem_cache *mem) |
677 | { |
678 | struct hlist_bl_head *slot; |
679 | struct dm_exception *ex; |
680 | struct hlist_bl_node *pos, *n; |
681 | int i, size; |
682 | |
683 | size = et->hash_mask + 1; |
684 | for (i = 0; i < size; i++) { |
685 | slot = et->table + i; |
686 | |
687 | hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) |
688 | kmem_cache_free(s: mem, objp: ex); |
689 | } |
690 | |
691 | kvfree(addr: et->table); |
692 | } |
693 | |
694 | static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) |
695 | { |
696 | return (chunk >> et->hash_shift) & et->hash_mask; |
697 | } |
698 | |
699 | static void dm_remove_exception(struct dm_exception *e) |
700 | { |
701 | hlist_bl_del(n: &e->hash_list); |
702 | } |
703 | |
704 | /* |
705 | * Return the exception data for a sector, or NULL if not |
706 | * remapped. |
707 | */ |
708 | static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, |
709 | chunk_t chunk) |
710 | { |
711 | struct hlist_bl_head *slot; |
712 | struct hlist_bl_node *pos; |
713 | struct dm_exception *e; |
714 | |
715 | slot = &et->table[exception_hash(et, chunk)]; |
716 | hlist_bl_for_each_entry(e, pos, slot, hash_list) |
717 | if (chunk >= e->old_chunk && |
718 | chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) |
719 | return e; |
720 | |
721 | return NULL; |
722 | } |
723 | |
724 | static struct dm_exception *alloc_completed_exception(gfp_t gfp) |
725 | { |
726 | struct dm_exception *e; |
727 | |
728 | e = kmem_cache_alloc(cachep: exception_cache, flags: gfp); |
729 | if (!e && gfp == GFP_NOIO) |
730 | e = kmem_cache_alloc(cachep: exception_cache, GFP_ATOMIC); |
731 | |
732 | return e; |
733 | } |
734 | |
735 | static void free_completed_exception(struct dm_exception *e) |
736 | { |
737 | kmem_cache_free(s: exception_cache, objp: e); |
738 | } |
739 | |
740 | static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) |
741 | { |
742 | struct dm_snap_pending_exception *pe = mempool_alloc(pool: &s->pending_pool, |
743 | GFP_NOIO); |
744 | |
745 | atomic_inc(v: &s->pending_exceptions_count); |
746 | pe->snap = s; |
747 | |
748 | return pe; |
749 | } |
750 | |
751 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
752 | { |
753 | struct dm_snapshot *s = pe->snap; |
754 | |
755 | mempool_free(element: pe, pool: &s->pending_pool); |
756 | smp_mb__before_atomic(); |
757 | atomic_dec(v: &s->pending_exceptions_count); |
758 | } |
759 | |
760 | static void dm_insert_exception(struct dm_exception_table *eh, |
761 | struct dm_exception *new_e) |
762 | { |
763 | struct hlist_bl_head *l; |
764 | struct hlist_bl_node *pos; |
765 | struct dm_exception *e = NULL; |
766 | |
767 | l = &eh->table[exception_hash(et: eh, chunk: new_e->old_chunk)]; |
768 | |
769 | /* Add immediately if this table doesn't support consecutive chunks */ |
770 | if (!eh->hash_shift) |
771 | goto out; |
772 | |
773 | /* List is ordered by old_chunk */ |
774 | hlist_bl_for_each_entry(e, pos, l, hash_list) { |
775 | /* Insert after an existing chunk? */ |
776 | if (new_e->old_chunk == (e->old_chunk + |
777 | dm_consecutive_chunk_count(e) + 1) && |
778 | new_e->new_chunk == (dm_chunk_number(chunk: e->new_chunk) + |
779 | dm_consecutive_chunk_count(e) + 1)) { |
780 | dm_consecutive_chunk_count_inc(e); |
781 | free_completed_exception(e: new_e); |
782 | return; |
783 | } |
784 | |
785 | /* Insert before an existing chunk? */ |
786 | if (new_e->old_chunk == (e->old_chunk - 1) && |
787 | new_e->new_chunk == (dm_chunk_number(chunk: e->new_chunk) - 1)) { |
788 | dm_consecutive_chunk_count_inc(e); |
789 | e->old_chunk--; |
790 | e->new_chunk--; |
791 | free_completed_exception(e: new_e); |
792 | return; |
793 | } |
794 | |
795 | if (new_e->old_chunk < e->old_chunk) |
796 | break; |
797 | } |
798 | |
799 | out: |
800 | if (!e) { |
801 | /* |
802 | * Either the table doesn't support consecutive chunks or slot |
803 | * l is empty. |
804 | */ |
805 | hlist_bl_add_head(n: &new_e->hash_list, h: l); |
806 | } else if (new_e->old_chunk < e->old_chunk) { |
807 | /* Add before an existing exception */ |
808 | hlist_bl_add_before(n: &new_e->hash_list, next: &e->hash_list); |
809 | } else { |
810 | /* Add to l's tail: e is the last exception in this slot */ |
811 | hlist_bl_add_behind(n: &new_e->hash_list, prev: &e->hash_list); |
812 | } |
813 | } |
814 | |
815 | /* |
816 | * Callback used by the exception stores to load exceptions when |
817 | * initialising. |
818 | */ |
819 | static int dm_add_exception(void *context, chunk_t old, chunk_t new) |
820 | { |
821 | struct dm_exception_table_lock lock; |
822 | struct dm_snapshot *s = context; |
823 | struct dm_exception *e; |
824 | |
825 | e = alloc_completed_exception(GFP_KERNEL); |
826 | if (!e) |
827 | return -ENOMEM; |
828 | |
829 | e->old_chunk = old; |
830 | |
831 | /* Consecutive_count is implicitly initialised to zero */ |
832 | e->new_chunk = new; |
833 | |
834 | /* |
835 | * Although there is no need to lock access to the exception tables |
836 | * here, if we don't then hlist_bl_add_head(), called by |
837 | * dm_insert_exception(), will complain about accessing the |
838 | * corresponding list without locking it first. |
839 | */ |
840 | dm_exception_table_lock_init(s, chunk: old, lock: &lock); |
841 | |
842 | dm_exception_table_lock(lock: &lock); |
843 | dm_insert_exception(eh: &s->complete, new_e: e); |
844 | dm_exception_table_unlock(lock: &lock); |
845 | |
846 | return 0; |
847 | } |
848 | |
849 | /* |
850 | * Return a minimum chunk size of all snapshots that have the specified origin. |
851 | * Return zero if the origin has no snapshots. |
852 | */ |
853 | static uint32_t __minimum_chunk_size(struct origin *o) |
854 | { |
855 | struct dm_snapshot *snap; |
856 | unsigned int chunk_size = rounddown_pow_of_two(UINT_MAX); |
857 | |
858 | if (o) |
859 | list_for_each_entry(snap, &o->snapshots, list) |
860 | chunk_size = min_not_zero(chunk_size, |
861 | snap->store->chunk_size); |
862 | |
863 | return (uint32_t) chunk_size; |
864 | } |
865 | |
866 | /* |
867 | * Hard coded magic. |
868 | */ |
869 | static int calc_max_buckets(void) |
870 | { |
871 | /* use a fixed size of 2MB */ |
872 | unsigned long mem = 2 * 1024 * 1024; |
873 | |
874 | mem /= sizeof(struct hlist_bl_head); |
875 | |
876 | return mem; |
877 | } |
878 | |
879 | /* |
880 | * Allocate room for a suitable hash table. |
881 | */ |
882 | static int init_hash_tables(struct dm_snapshot *s) |
883 | { |
884 | sector_t hash_size, cow_dev_size, max_buckets; |
885 | |
886 | /* |
887 | * Calculate based on the size of the original volume or |
888 | * the COW volume... |
889 | */ |
890 | cow_dev_size = get_dev_size(bdev: s->cow->bdev); |
891 | max_buckets = calc_max_buckets(); |
892 | |
893 | hash_size = cow_dev_size >> s->store->chunk_shift; |
894 | hash_size = min(hash_size, max_buckets); |
895 | |
896 | if (hash_size < 64) |
897 | hash_size = 64; |
898 | hash_size = rounddown_pow_of_two(hash_size); |
899 | if (dm_exception_table_init(et: &s->complete, size: hash_size, |
900 | DM_CHUNK_CONSECUTIVE_BITS)) |
901 | return -ENOMEM; |
902 | |
903 | /* |
904 | * Allocate hash table for in-flight exceptions |
905 | * Make this smaller than the real hash table |
906 | */ |
907 | hash_size >>= 3; |
908 | if (hash_size < 64) |
909 | hash_size = 64; |
910 | |
911 | if (dm_exception_table_init(et: &s->pending, size: hash_size, hash_shift: 0)) { |
912 | dm_exception_table_exit(et: &s->complete, mem: exception_cache); |
913 | return -ENOMEM; |
914 | } |
915 | |
916 | return 0; |
917 | } |
918 | |
919 | static void merge_shutdown(struct dm_snapshot *s) |
920 | { |
921 | clear_bit_unlock(RUNNING_MERGE, addr: &s->state_bits); |
922 | smp_mb__after_atomic(); |
923 | wake_up_bit(word: &s->state_bits, RUNNING_MERGE); |
924 | } |
925 | |
926 | static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) |
927 | { |
928 | s->first_merging_chunk = 0; |
929 | s->num_merging_chunks = 0; |
930 | |
931 | return bio_list_get(bl: &s->bios_queued_during_merge); |
932 | } |
933 | |
934 | /* |
935 | * Remove one chunk from the index of completed exceptions. |
936 | */ |
937 | static int __remove_single_exception_chunk(struct dm_snapshot *s, |
938 | chunk_t old_chunk) |
939 | { |
940 | struct dm_exception *e; |
941 | |
942 | e = dm_lookup_exception(et: &s->complete, chunk: old_chunk); |
943 | if (!e) { |
944 | DMERR("Corruption detected: exception for block %llu is on disk but not in memory" , |
945 | (unsigned long long)old_chunk); |
946 | return -EINVAL; |
947 | } |
948 | |
949 | /* |
950 | * If this is the only chunk using this exception, remove exception. |
951 | */ |
952 | if (!dm_consecutive_chunk_count(e)) { |
953 | dm_remove_exception(e); |
954 | free_completed_exception(e); |
955 | return 0; |
956 | } |
957 | |
958 | /* |
959 | * The chunk may be either at the beginning or the end of a |
960 | * group of consecutive chunks - never in the middle. We are |
961 | * removing chunks in the opposite order to that in which they |
962 | * were added, so this should always be true. |
963 | * Decrement the consecutive chunk counter and adjust the |
964 | * starting point if necessary. |
965 | */ |
966 | if (old_chunk == e->old_chunk) { |
967 | e->old_chunk++; |
968 | e->new_chunk++; |
969 | } else if (old_chunk != e->old_chunk + |
970 | dm_consecutive_chunk_count(e)) { |
971 | DMERR("Attempt to merge block %llu from the middle of a chunk range [%llu - %llu]" , |
972 | (unsigned long long)old_chunk, |
973 | (unsigned long long)e->old_chunk, |
974 | (unsigned long long) |
975 | e->old_chunk + dm_consecutive_chunk_count(e)); |
976 | return -EINVAL; |
977 | } |
978 | |
979 | dm_consecutive_chunk_count_dec(e); |
980 | |
981 | return 0; |
982 | } |
983 | |
984 | static void flush_bios(struct bio *bio); |
985 | |
986 | static int remove_single_exception_chunk(struct dm_snapshot *s) |
987 | { |
988 | struct bio *b = NULL; |
989 | int r; |
990 | chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; |
991 | |
992 | down_write(sem: &s->lock); |
993 | |
994 | /* |
995 | * Process chunks (and associated exceptions) in reverse order |
996 | * so that dm_consecutive_chunk_count_dec() accounting works. |
997 | */ |
998 | do { |
999 | r = __remove_single_exception_chunk(s, old_chunk); |
1000 | if (r) |
1001 | goto out; |
1002 | } while (old_chunk-- > s->first_merging_chunk); |
1003 | |
1004 | b = __release_queued_bios_after_merge(s); |
1005 | |
1006 | out: |
1007 | up_write(sem: &s->lock); |
1008 | if (b) |
1009 | flush_bios(bio: b); |
1010 | |
1011 | return r; |
1012 | } |
1013 | |
1014 | static int origin_write_extent(struct dm_snapshot *merging_snap, |
1015 | sector_t sector, unsigned int chunk_size); |
1016 | |
1017 | static void merge_callback(int read_err, unsigned long write_err, |
1018 | void *context); |
1019 | |
1020 | static uint64_t read_pending_exceptions_done_count(void) |
1021 | { |
1022 | uint64_t pending_exceptions_done; |
1023 | |
1024 | spin_lock(lock: &_pending_exceptions_done_spinlock); |
1025 | pending_exceptions_done = _pending_exceptions_done_count; |
1026 | spin_unlock(lock: &_pending_exceptions_done_spinlock); |
1027 | |
1028 | return pending_exceptions_done; |
1029 | } |
1030 | |
1031 | static void increment_pending_exceptions_done_count(void) |
1032 | { |
1033 | spin_lock(lock: &_pending_exceptions_done_spinlock); |
1034 | _pending_exceptions_done_count++; |
1035 | spin_unlock(lock: &_pending_exceptions_done_spinlock); |
1036 | |
1037 | wake_up_all(&_pending_exceptions_done); |
1038 | } |
1039 | |
1040 | static void snapshot_merge_next_chunks(struct dm_snapshot *s) |
1041 | { |
1042 | int i, linear_chunks; |
1043 | chunk_t old_chunk, new_chunk; |
1044 | struct dm_io_region src, dest; |
1045 | sector_t io_size; |
1046 | uint64_t previous_count; |
1047 | |
1048 | BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); |
1049 | if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) |
1050 | goto shut; |
1051 | |
1052 | /* |
1053 | * valid flag never changes during merge, so no lock required. |
1054 | */ |
1055 | if (!s->valid) { |
1056 | DMERR("Snapshot is invalid: can't merge" ); |
1057 | goto shut; |
1058 | } |
1059 | |
1060 | linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, |
1061 | &new_chunk); |
1062 | if (linear_chunks <= 0) { |
1063 | if (linear_chunks < 0) { |
1064 | DMERR("Read error in exception store: shutting down merge" ); |
1065 | down_write(sem: &s->lock); |
1066 | s->merge_failed = true; |
1067 | up_write(sem: &s->lock); |
1068 | } |
1069 | goto shut; |
1070 | } |
1071 | |
1072 | /* Adjust old_chunk and new_chunk to reflect start of linear region */ |
1073 | old_chunk = old_chunk + 1 - linear_chunks; |
1074 | new_chunk = new_chunk + 1 - linear_chunks; |
1075 | |
1076 | /* |
1077 | * Use one (potentially large) I/O to copy all 'linear_chunks' |
1078 | * from the exception store to the origin |
1079 | */ |
1080 | io_size = linear_chunks * s->store->chunk_size; |
1081 | |
1082 | dest.bdev = s->origin->bdev; |
1083 | dest.sector = chunk_to_sector(store: s->store, chunk: old_chunk); |
1084 | dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); |
1085 | |
1086 | src.bdev = s->cow->bdev; |
1087 | src.sector = chunk_to_sector(store: s->store, chunk: new_chunk); |
1088 | src.count = dest.count; |
1089 | |
1090 | /* |
1091 | * Reallocate any exceptions needed in other snapshots then |
1092 | * wait for the pending exceptions to complete. |
1093 | * Each time any pending exception (globally on the system) |
1094 | * completes we are woken and repeat the process to find out |
1095 | * if we can proceed. While this may not seem a particularly |
1096 | * efficient algorithm, it is not expected to have any |
1097 | * significant impact on performance. |
1098 | */ |
1099 | previous_count = read_pending_exceptions_done_count(); |
1100 | while (origin_write_extent(merging_snap: s, sector: dest.sector, chunk_size: io_size)) { |
1101 | wait_event(_pending_exceptions_done, |
1102 | (read_pending_exceptions_done_count() != |
1103 | previous_count)); |
1104 | /* Retry after the wait, until all exceptions are done. */ |
1105 | previous_count = read_pending_exceptions_done_count(); |
1106 | } |
1107 | |
1108 | down_write(sem: &s->lock); |
1109 | s->first_merging_chunk = old_chunk; |
1110 | s->num_merging_chunks = linear_chunks; |
1111 | up_write(sem: &s->lock); |
1112 | |
1113 | /* Wait until writes to all 'linear_chunks' drain */ |
1114 | for (i = 0; i < linear_chunks; i++) |
1115 | __check_for_conflicting_io(s, chunk: old_chunk + i); |
1116 | |
1117 | dm_kcopyd_copy(kc: s->kcopyd_client, from: &src, num_dests: 1, dests: &dest, flags: 0, fn: merge_callback, context: s); |
1118 | return; |
1119 | |
1120 | shut: |
1121 | merge_shutdown(s); |
1122 | } |
1123 | |
1124 | static void error_bios(struct bio *bio); |
1125 | |
1126 | static void merge_callback(int read_err, unsigned long write_err, void *context) |
1127 | { |
1128 | struct dm_snapshot *s = context; |
1129 | struct bio *b = NULL; |
1130 | |
1131 | if (read_err || write_err) { |
1132 | if (read_err) |
1133 | DMERR("Read error: shutting down merge." ); |
1134 | else |
1135 | DMERR("Write error: shutting down merge." ); |
1136 | goto shut; |
1137 | } |
1138 | |
1139 | if (blkdev_issue_flush(bdev: s->origin->bdev) < 0) { |
1140 | DMERR("Flush after merge failed: shutting down merge" ); |
1141 | goto shut; |
1142 | } |
1143 | |
1144 | if (s->store->type->commit_merge(s->store, |
1145 | s->num_merging_chunks) < 0) { |
1146 | DMERR("Write error in exception store: shutting down merge" ); |
1147 | goto shut; |
1148 | } |
1149 | |
1150 | if (remove_single_exception_chunk(s) < 0) |
1151 | goto shut; |
1152 | |
1153 | snapshot_merge_next_chunks(s); |
1154 | |
1155 | return; |
1156 | |
1157 | shut: |
1158 | down_write(sem: &s->lock); |
1159 | s->merge_failed = true; |
1160 | b = __release_queued_bios_after_merge(s); |
1161 | up_write(sem: &s->lock); |
1162 | error_bios(bio: b); |
1163 | |
1164 | merge_shutdown(s); |
1165 | } |
1166 | |
1167 | static void start_merge(struct dm_snapshot *s) |
1168 | { |
1169 | if (!test_and_set_bit(RUNNING_MERGE, addr: &s->state_bits)) |
1170 | snapshot_merge_next_chunks(s); |
1171 | } |
1172 | |
1173 | /* |
1174 | * Stop the merging process and wait until it finishes. |
1175 | */ |
1176 | static void stop_merge(struct dm_snapshot *s) |
1177 | { |
1178 | set_bit(SHUTDOWN_MERGE, addr: &s->state_bits); |
1179 | wait_on_bit(word: &s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); |
1180 | clear_bit(SHUTDOWN_MERGE, addr: &s->state_bits); |
1181 | } |
1182 | |
1183 | static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s, |
1184 | struct dm_target *ti) |
1185 | { |
1186 | int r; |
1187 | unsigned int argc; |
1188 | const char *arg_name; |
1189 | |
1190 | static const struct dm_arg _args[] = { |
1191 | {0, 2, "Invalid number of feature arguments" }, |
1192 | }; |
1193 | |
1194 | /* |
1195 | * No feature arguments supplied. |
1196 | */ |
1197 | if (!as->argc) |
1198 | return 0; |
1199 | |
1200 | r = dm_read_arg_group(arg: _args, arg_set: as, num_args: &argc, error: &ti->error); |
1201 | if (r) |
1202 | return -EINVAL; |
1203 | |
1204 | while (argc && !r) { |
1205 | arg_name = dm_shift_arg(as); |
1206 | argc--; |
1207 | |
1208 | if (!strcasecmp(s1: arg_name, s2: "discard_zeroes_cow" )) |
1209 | s->discard_zeroes_cow = true; |
1210 | |
1211 | else if (!strcasecmp(s1: arg_name, s2: "discard_passdown_origin" )) |
1212 | s->discard_passdown_origin = true; |
1213 | |
1214 | else { |
1215 | ti->error = "Unrecognised feature requested" ; |
1216 | r = -EINVAL; |
1217 | break; |
1218 | } |
1219 | } |
1220 | |
1221 | if (!s->discard_zeroes_cow && s->discard_passdown_origin) { |
1222 | /* |
1223 | * TODO: really these are disjoint.. but ti->num_discard_bios |
1224 | * and dm_bio_get_target_bio_nr() require rigid constraints. |
1225 | */ |
1226 | ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow" ; |
1227 | r = -EINVAL; |
1228 | } |
1229 | |
1230 | return r; |
1231 | } |
1232 | |
1233 | /* |
1234 | * Construct a snapshot mapping: |
1235 | * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*] |
1236 | */ |
1237 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1238 | { |
1239 | struct dm_snapshot *s; |
1240 | struct dm_arg_set as; |
1241 | int i; |
1242 | int r = -EINVAL; |
1243 | char *origin_path, *cow_path; |
1244 | unsigned int args_used, num_flush_bios = 1; |
1245 | blk_mode_t origin_mode = BLK_OPEN_READ; |
1246 | |
1247 | if (argc < 4) { |
1248 | ti->error = "requires 4 or more arguments" ; |
1249 | r = -EINVAL; |
1250 | goto bad; |
1251 | } |
1252 | |
1253 | if (dm_target_is_snapshot_merge(ti)) { |
1254 | num_flush_bios = 2; |
1255 | origin_mode = BLK_OPEN_WRITE; |
1256 | } |
1257 | |
1258 | s = kzalloc(size: sizeof(*s), GFP_KERNEL); |
1259 | if (!s) { |
1260 | ti->error = "Cannot allocate private snapshot structure" ; |
1261 | r = -ENOMEM; |
1262 | goto bad; |
1263 | } |
1264 | |
1265 | as.argc = argc; |
1266 | as.argv = argv; |
1267 | dm_consume_args(as: &as, num_args: 4); |
1268 | r = parse_snapshot_features(as: &as, s, ti); |
1269 | if (r) |
1270 | goto bad_features; |
1271 | |
1272 | origin_path = argv[0]; |
1273 | argv++; |
1274 | argc--; |
1275 | |
1276 | r = dm_get_device(ti, path: origin_path, mode: origin_mode, result: &s->origin); |
1277 | if (r) { |
1278 | ti->error = "Cannot get origin device" ; |
1279 | goto bad_origin; |
1280 | } |
1281 | |
1282 | cow_path = argv[0]; |
1283 | argv++; |
1284 | argc--; |
1285 | |
1286 | r = dm_get_device(ti, path: cow_path, mode: dm_table_get_mode(t: ti->table), result: &s->cow); |
1287 | if (r) { |
1288 | ti->error = "Cannot get COW device" ; |
1289 | goto bad_cow; |
1290 | } |
1291 | if (s->cow->bdev && s->cow->bdev == s->origin->bdev) { |
1292 | ti->error = "COW device cannot be the same as origin device" ; |
1293 | r = -EINVAL; |
1294 | goto bad_store; |
1295 | } |
1296 | |
1297 | r = dm_exception_store_create(ti, argc, argv, snap: s, args_used: &args_used, store: &s->store); |
1298 | if (r) { |
1299 | ti->error = "Couldn't create exception store" ; |
1300 | r = -EINVAL; |
1301 | goto bad_store; |
1302 | } |
1303 | |
1304 | argv += args_used; |
1305 | argc -= args_used; |
1306 | |
1307 | s->ti = ti; |
1308 | s->valid = 1; |
1309 | s->snapshot_overflowed = 0; |
1310 | s->active = 0; |
1311 | atomic_set(v: &s->pending_exceptions_count, i: 0); |
1312 | spin_lock_init(&s->pe_allocation_lock); |
1313 | s->exception_start_sequence = 0; |
1314 | s->exception_complete_sequence = 0; |
1315 | s->out_of_order_tree = RB_ROOT; |
1316 | init_rwsem(&s->lock); |
1317 | INIT_LIST_HEAD(list: &s->list); |
1318 | spin_lock_init(&s->pe_lock); |
1319 | s->state_bits = 0; |
1320 | s->merge_failed = false; |
1321 | s->first_merging_chunk = 0; |
1322 | s->num_merging_chunks = 0; |
1323 | bio_list_init(bl: &s->bios_queued_during_merge); |
1324 | |
1325 | /* Allocate hash table for COW data */ |
1326 | if (init_hash_tables(s)) { |
1327 | ti->error = "Unable to allocate hash table space" ; |
1328 | r = -ENOMEM; |
1329 | goto bad_hash_tables; |
1330 | } |
1331 | |
1332 | init_waitqueue_head(&s->in_progress_wait); |
1333 | |
1334 | s->kcopyd_client = dm_kcopyd_client_create(throttle: &dm_kcopyd_throttle); |
1335 | if (IS_ERR(ptr: s->kcopyd_client)) { |
1336 | r = PTR_ERR(ptr: s->kcopyd_client); |
1337 | ti->error = "Could not create kcopyd client" ; |
1338 | goto bad_kcopyd; |
1339 | } |
1340 | |
1341 | r = mempool_init_slab_pool(pool: &s->pending_pool, MIN_IOS, kc: pending_cache); |
1342 | if (r) { |
1343 | ti->error = "Could not allocate mempool for pending exceptions" ; |
1344 | goto bad_pending_pool; |
1345 | } |
1346 | |
1347 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) |
1348 | INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); |
1349 | |
1350 | spin_lock_init(&s->tracked_chunk_lock); |
1351 | |
1352 | ti->private = s; |
1353 | ti->num_flush_bios = num_flush_bios; |
1354 | if (s->discard_zeroes_cow) |
1355 | ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1); |
1356 | ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk); |
1357 | |
1358 | /* Add snapshot to the list of snapshots for this origin */ |
1359 | /* Exceptions aren't triggered till snapshot_resume() is called */ |
1360 | r = register_snapshot(snap: s); |
1361 | if (r == -ENOMEM) { |
1362 | ti->error = "Snapshot origin struct allocation failed" ; |
1363 | goto bad_load_and_register; |
1364 | } else if (r < 0) { |
1365 | /* invalid handover, register_snapshot has set ti->error */ |
1366 | goto bad_load_and_register; |
1367 | } |
1368 | |
1369 | /* |
1370 | * Metadata must only be loaded into one table at once, so skip this |
1371 | * if metadata will be handed over during resume. |
1372 | * Chunk size will be set during the handover - set it to zero to |
1373 | * ensure it's ignored. |
1374 | */ |
1375 | if (r > 0) { |
1376 | s->store->chunk_size = 0; |
1377 | return 0; |
1378 | } |
1379 | |
1380 | r = s->store->type->read_metadata(s->store, dm_add_exception, |
1381 | (void *)s); |
1382 | if (r < 0) { |
1383 | ti->error = "Failed to read snapshot metadata" ; |
1384 | goto bad_read_metadata; |
1385 | } else if (r > 0) { |
1386 | s->valid = 0; |
1387 | DMWARN("Snapshot is marked invalid." ); |
1388 | } |
1389 | |
1390 | if (!s->store->chunk_size) { |
1391 | ti->error = "Chunk size not set" ; |
1392 | r = -EINVAL; |
1393 | goto bad_read_metadata; |
1394 | } |
1395 | |
1396 | r = dm_set_target_max_io_len(ti, len: s->store->chunk_size); |
1397 | if (r) |
1398 | goto bad_read_metadata; |
1399 | |
1400 | return 0; |
1401 | |
1402 | bad_read_metadata: |
1403 | unregister_snapshot(s); |
1404 | bad_load_and_register: |
1405 | mempool_exit(pool: &s->pending_pool); |
1406 | bad_pending_pool: |
1407 | dm_kcopyd_client_destroy(kc: s->kcopyd_client); |
1408 | bad_kcopyd: |
1409 | dm_exception_table_exit(et: &s->pending, mem: pending_cache); |
1410 | dm_exception_table_exit(et: &s->complete, mem: exception_cache); |
1411 | bad_hash_tables: |
1412 | dm_exception_store_destroy(store: s->store); |
1413 | bad_store: |
1414 | dm_put_device(ti, d: s->cow); |
1415 | bad_cow: |
1416 | dm_put_device(ti, d: s->origin); |
1417 | bad_origin: |
1418 | bad_features: |
1419 | kfree(objp: s); |
1420 | bad: |
1421 | return r; |
1422 | } |
1423 | |
1424 | static void __free_exceptions(struct dm_snapshot *s) |
1425 | { |
1426 | dm_kcopyd_client_destroy(kc: s->kcopyd_client); |
1427 | s->kcopyd_client = NULL; |
1428 | |
1429 | dm_exception_table_exit(et: &s->pending, mem: pending_cache); |
1430 | dm_exception_table_exit(et: &s->complete, mem: exception_cache); |
1431 | } |
1432 | |
1433 | static void __handover_exceptions(struct dm_snapshot *snap_src, |
1434 | struct dm_snapshot *snap_dest) |
1435 | { |
1436 | union { |
1437 | struct dm_exception_table table_swap; |
1438 | struct dm_exception_store *store_swap; |
1439 | } u; |
1440 | |
1441 | /* |
1442 | * Swap all snapshot context information between the two instances. |
1443 | */ |
1444 | u.table_swap = snap_dest->complete; |
1445 | snap_dest->complete = snap_src->complete; |
1446 | snap_src->complete = u.table_swap; |
1447 | |
1448 | u.store_swap = snap_dest->store; |
1449 | snap_dest->store = snap_src->store; |
1450 | snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; |
1451 | snap_src->store = u.store_swap; |
1452 | |
1453 | snap_dest->store->snap = snap_dest; |
1454 | snap_src->store->snap = snap_src; |
1455 | |
1456 | snap_dest->ti->max_io_len = snap_dest->store->chunk_size; |
1457 | snap_dest->valid = snap_src->valid; |
1458 | snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed; |
1459 | |
1460 | /* |
1461 | * Set source invalid to ensure it receives no further I/O. |
1462 | */ |
1463 | snap_src->valid = 0; |
1464 | } |
1465 | |
1466 | static void snapshot_dtr(struct dm_target *ti) |
1467 | { |
1468 | #ifdef CONFIG_DM_DEBUG |
1469 | int i; |
1470 | #endif |
1471 | struct dm_snapshot *s = ti->private; |
1472 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
1473 | |
1474 | down_read(sem: &_origins_lock); |
1475 | /* Check whether exception handover must be cancelled */ |
1476 | (void) __find_snapshots_sharing_cow(snap: s, snap_src: &snap_src, snap_dest: &snap_dest, NULL); |
1477 | if (snap_src && snap_dest && (s == snap_src)) { |
1478 | down_write(sem: &snap_dest->lock); |
1479 | snap_dest->valid = 0; |
1480 | up_write(sem: &snap_dest->lock); |
1481 | DMERR("Cancelling snapshot handover." ); |
1482 | } |
1483 | up_read(sem: &_origins_lock); |
1484 | |
1485 | if (dm_target_is_snapshot_merge(ti)) |
1486 | stop_merge(s); |
1487 | |
1488 | /* Prevent further origin writes from using this snapshot. */ |
1489 | /* After this returns there can be no new kcopyd jobs. */ |
1490 | unregister_snapshot(s); |
1491 | |
1492 | while (atomic_read(v: &s->pending_exceptions_count)) |
1493 | fsleep(usecs: 1000); |
1494 | /* |
1495 | * Ensure instructions in mempool_exit aren't reordered |
1496 | * before atomic_read. |
1497 | */ |
1498 | smp_mb(); |
1499 | |
1500 | #ifdef CONFIG_DM_DEBUG |
1501 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) |
1502 | BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); |
1503 | #endif |
1504 | |
1505 | __free_exceptions(s); |
1506 | |
1507 | mempool_exit(pool: &s->pending_pool); |
1508 | |
1509 | dm_exception_store_destroy(store: s->store); |
1510 | |
1511 | dm_put_device(ti, d: s->cow); |
1512 | |
1513 | dm_put_device(ti, d: s->origin); |
1514 | |
1515 | WARN_ON(s->in_progress); |
1516 | |
1517 | kfree(objp: s); |
1518 | } |
1519 | |
1520 | static void account_start_copy(struct dm_snapshot *s) |
1521 | { |
1522 | spin_lock(lock: &s->in_progress_wait.lock); |
1523 | s->in_progress++; |
1524 | spin_unlock(lock: &s->in_progress_wait.lock); |
1525 | } |
1526 | |
1527 | static void account_end_copy(struct dm_snapshot *s) |
1528 | { |
1529 | spin_lock(lock: &s->in_progress_wait.lock); |
1530 | BUG_ON(!s->in_progress); |
1531 | s->in_progress--; |
1532 | if (likely(s->in_progress <= cow_threshold) && |
1533 | unlikely(waitqueue_active(&s->in_progress_wait))) |
1534 | wake_up_locked(&s->in_progress_wait); |
1535 | spin_unlock(lock: &s->in_progress_wait.lock); |
1536 | } |
1537 | |
1538 | static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) |
1539 | { |
1540 | if (unlikely(s->in_progress > cow_threshold)) { |
1541 | spin_lock(lock: &s->in_progress_wait.lock); |
1542 | if (likely(s->in_progress > cow_threshold)) { |
1543 | /* |
1544 | * NOTE: this throttle doesn't account for whether |
1545 | * the caller is servicing an IO that will trigger a COW |
1546 | * so excess throttling may result for chunks not required |
1547 | * to be COW'd. But if cow_threshold was reached, extra |
1548 | * throttling is unlikely to negatively impact performance. |
1549 | */ |
1550 | DECLARE_WAITQUEUE(wait, current); |
1551 | |
1552 | __add_wait_queue(wq_head: &s->in_progress_wait, wq_entry: &wait); |
1553 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1554 | spin_unlock(lock: &s->in_progress_wait.lock); |
1555 | if (unlock_origins) |
1556 | up_read(sem: &_origins_lock); |
1557 | io_schedule(); |
1558 | remove_wait_queue(wq_head: &s->in_progress_wait, wq_entry: &wait); |
1559 | return false; |
1560 | } |
1561 | spin_unlock(lock: &s->in_progress_wait.lock); |
1562 | } |
1563 | return true; |
1564 | } |
1565 | |
1566 | /* |
1567 | * Flush a list of buffers. |
1568 | */ |
1569 | static void flush_bios(struct bio *bio) |
1570 | { |
1571 | struct bio *n; |
1572 | |
1573 | while (bio) { |
1574 | n = bio->bi_next; |
1575 | bio->bi_next = NULL; |
1576 | submit_bio_noacct(bio); |
1577 | bio = n; |
1578 | } |
1579 | } |
1580 | |
1581 | static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit); |
1582 | |
1583 | /* |
1584 | * Flush a list of buffers. |
1585 | */ |
1586 | static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) |
1587 | { |
1588 | struct bio *n; |
1589 | int r; |
1590 | |
1591 | while (bio) { |
1592 | n = bio->bi_next; |
1593 | bio->bi_next = NULL; |
1594 | r = do_origin(origin: s->origin, bio, limit: false); |
1595 | if (r == DM_MAPIO_REMAPPED) |
1596 | submit_bio_noacct(bio); |
1597 | bio = n; |
1598 | } |
1599 | } |
1600 | |
1601 | /* |
1602 | * Error a list of buffers. |
1603 | */ |
1604 | static void error_bios(struct bio *bio) |
1605 | { |
1606 | struct bio *n; |
1607 | |
1608 | while (bio) { |
1609 | n = bio->bi_next; |
1610 | bio->bi_next = NULL; |
1611 | bio_io_error(bio); |
1612 | bio = n; |
1613 | } |
1614 | } |
1615 | |
1616 | static void __invalidate_snapshot(struct dm_snapshot *s, int err) |
1617 | { |
1618 | if (!s->valid) |
1619 | return; |
1620 | |
1621 | if (err == -EIO) |
1622 | DMERR("Invalidating snapshot: Error reading/writing." ); |
1623 | else if (err == -ENOMEM) |
1624 | DMERR("Invalidating snapshot: Unable to allocate exception." ); |
1625 | |
1626 | if (s->store->type->drop_snapshot) |
1627 | s->store->type->drop_snapshot(s->store); |
1628 | |
1629 | s->valid = 0; |
1630 | |
1631 | dm_table_event(t: s->ti->table); |
1632 | } |
1633 | |
1634 | static void invalidate_snapshot(struct dm_snapshot *s, int err) |
1635 | { |
1636 | down_write(sem: &s->lock); |
1637 | __invalidate_snapshot(s, err); |
1638 | up_write(sem: &s->lock); |
1639 | } |
1640 | |
1641 | static void pending_complete(void *context, int success) |
1642 | { |
1643 | struct dm_snap_pending_exception *pe = context; |
1644 | struct dm_exception *e; |
1645 | struct dm_snapshot *s = pe->snap; |
1646 | struct bio *origin_bios = NULL; |
1647 | struct bio *snapshot_bios = NULL; |
1648 | struct bio *full_bio = NULL; |
1649 | struct dm_exception_table_lock lock; |
1650 | int error = 0; |
1651 | |
1652 | dm_exception_table_lock_init(s, chunk: pe->e.old_chunk, lock: &lock); |
1653 | |
1654 | if (!success) { |
1655 | /* Read/write error - snapshot is unusable */ |
1656 | invalidate_snapshot(s, err: -EIO); |
1657 | error = 1; |
1658 | |
1659 | dm_exception_table_lock(lock: &lock); |
1660 | goto out; |
1661 | } |
1662 | |
1663 | e = alloc_completed_exception(GFP_NOIO); |
1664 | if (!e) { |
1665 | invalidate_snapshot(s, err: -ENOMEM); |
1666 | error = 1; |
1667 | |
1668 | dm_exception_table_lock(lock: &lock); |
1669 | goto out; |
1670 | } |
1671 | *e = pe->e; |
1672 | |
1673 | down_read(sem: &s->lock); |
1674 | dm_exception_table_lock(lock: &lock); |
1675 | if (!s->valid) { |
1676 | up_read(sem: &s->lock); |
1677 | free_completed_exception(e); |
1678 | error = 1; |
1679 | |
1680 | goto out; |
1681 | } |
1682 | |
1683 | /* |
1684 | * Add a proper exception. After inserting the completed exception all |
1685 | * subsequent snapshot reads to this chunk will be redirected to the |
1686 | * COW device. This ensures that we do not starve. Moreover, as long |
1687 | * as the pending exception exists, neither origin writes nor snapshot |
1688 | * merging can overwrite the chunk in origin. |
1689 | */ |
1690 | dm_insert_exception(eh: &s->complete, new_e: e); |
1691 | up_read(sem: &s->lock); |
1692 | |
1693 | /* Wait for conflicting reads to drain */ |
1694 | if (__chunk_is_tracked(s, chunk: pe->e.old_chunk)) { |
1695 | dm_exception_table_unlock(lock: &lock); |
1696 | __check_for_conflicting_io(s, chunk: pe->e.old_chunk); |
1697 | dm_exception_table_lock(lock: &lock); |
1698 | } |
1699 | |
1700 | out: |
1701 | /* Remove the in-flight exception from the list */ |
1702 | dm_remove_exception(e: &pe->e); |
1703 | |
1704 | dm_exception_table_unlock(lock: &lock); |
1705 | |
1706 | snapshot_bios = bio_list_get(bl: &pe->snapshot_bios); |
1707 | origin_bios = bio_list_get(bl: &pe->origin_bios); |
1708 | full_bio = pe->full_bio; |
1709 | if (full_bio) |
1710 | full_bio->bi_end_io = pe->full_bio_end_io; |
1711 | increment_pending_exceptions_done_count(); |
1712 | |
1713 | /* Submit any pending write bios */ |
1714 | if (error) { |
1715 | if (full_bio) |
1716 | bio_io_error(bio: full_bio); |
1717 | error_bios(bio: snapshot_bios); |
1718 | } else { |
1719 | if (full_bio) |
1720 | bio_endio(full_bio); |
1721 | flush_bios(bio: snapshot_bios); |
1722 | } |
1723 | |
1724 | retry_origin_bios(s, bio: origin_bios); |
1725 | |
1726 | free_pending_exception(pe); |
1727 | } |
1728 | |
1729 | static void complete_exception(struct dm_snap_pending_exception *pe) |
1730 | { |
1731 | struct dm_snapshot *s = pe->snap; |
1732 | |
1733 | /* Update the metadata if we are persistent */ |
1734 | s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, |
1735 | pending_complete, pe); |
1736 | } |
1737 | |
1738 | /* |
1739 | * Called when the copy I/O has finished. kcopyd actually runs |
1740 | * this code so don't block. |
1741 | */ |
1742 | static void copy_callback(int read_err, unsigned long write_err, void *context) |
1743 | { |
1744 | struct dm_snap_pending_exception *pe = context; |
1745 | struct dm_snapshot *s = pe->snap; |
1746 | |
1747 | pe->copy_error = read_err || write_err; |
1748 | |
1749 | if (pe->exception_sequence == s->exception_complete_sequence) { |
1750 | struct rb_node *next; |
1751 | |
1752 | s->exception_complete_sequence++; |
1753 | complete_exception(pe); |
1754 | |
1755 | next = rb_first(&s->out_of_order_tree); |
1756 | while (next) { |
1757 | pe = rb_entry(next, struct dm_snap_pending_exception, |
1758 | out_of_order_node); |
1759 | if (pe->exception_sequence != s->exception_complete_sequence) |
1760 | break; |
1761 | next = rb_next(next); |
1762 | s->exception_complete_sequence++; |
1763 | rb_erase(&pe->out_of_order_node, &s->out_of_order_tree); |
1764 | complete_exception(pe); |
1765 | cond_resched(); |
1766 | } |
1767 | } else { |
1768 | struct rb_node *parent = NULL; |
1769 | struct rb_node **p = &s->out_of_order_tree.rb_node; |
1770 | struct dm_snap_pending_exception *pe2; |
1771 | |
1772 | while (*p) { |
1773 | pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node); |
1774 | parent = *p; |
1775 | |
1776 | BUG_ON(pe->exception_sequence == pe2->exception_sequence); |
1777 | if (pe->exception_sequence < pe2->exception_sequence) |
1778 | p = &((*p)->rb_left); |
1779 | else |
1780 | p = &((*p)->rb_right); |
1781 | } |
1782 | |
1783 | rb_link_node(node: &pe->out_of_order_node, parent, rb_link: p); |
1784 | rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); |
1785 | } |
1786 | account_end_copy(s); |
1787 | } |
1788 | |
1789 | /* |
1790 | * Dispatches the copy operation to kcopyd. |
1791 | */ |
1792 | static void start_copy(struct dm_snap_pending_exception *pe) |
1793 | { |
1794 | struct dm_snapshot *s = pe->snap; |
1795 | struct dm_io_region src, dest; |
1796 | struct block_device *bdev = s->origin->bdev; |
1797 | sector_t dev_size; |
1798 | |
1799 | dev_size = get_dev_size(bdev); |
1800 | |
1801 | src.bdev = bdev; |
1802 | src.sector = chunk_to_sector(store: s->store, chunk: pe->e.old_chunk); |
1803 | src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); |
1804 | |
1805 | dest.bdev = s->cow->bdev; |
1806 | dest.sector = chunk_to_sector(store: s->store, chunk: pe->e.new_chunk); |
1807 | dest.count = src.count; |
1808 | |
1809 | /* Hand over to kcopyd */ |
1810 | account_start_copy(s); |
1811 | dm_kcopyd_copy(kc: s->kcopyd_client, from: &src, num_dests: 1, dests: &dest, flags: 0, fn: copy_callback, context: pe); |
1812 | } |
1813 | |
1814 | static void full_bio_end_io(struct bio *bio) |
1815 | { |
1816 | void *callback_data = bio->bi_private; |
1817 | |
1818 | dm_kcopyd_do_callback(job: callback_data, read_err: 0, write_err: bio->bi_status ? 1 : 0); |
1819 | } |
1820 | |
1821 | static void start_full_bio(struct dm_snap_pending_exception *pe, |
1822 | struct bio *bio) |
1823 | { |
1824 | struct dm_snapshot *s = pe->snap; |
1825 | void *callback_data; |
1826 | |
1827 | pe->full_bio = bio; |
1828 | pe->full_bio_end_io = bio->bi_end_io; |
1829 | |
1830 | account_start_copy(s); |
1831 | callback_data = dm_kcopyd_prepare_callback(kc: s->kcopyd_client, |
1832 | fn: copy_callback, context: pe); |
1833 | |
1834 | bio->bi_end_io = full_bio_end_io; |
1835 | bio->bi_private = callback_data; |
1836 | |
1837 | submit_bio_noacct(bio); |
1838 | } |
1839 | |
1840 | static struct dm_snap_pending_exception * |
1841 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) |
1842 | { |
1843 | struct dm_exception *e = dm_lookup_exception(et: &s->pending, chunk); |
1844 | |
1845 | if (!e) |
1846 | return NULL; |
1847 | |
1848 | return container_of(e, struct dm_snap_pending_exception, e); |
1849 | } |
1850 | |
1851 | /* |
1852 | * Inserts a pending exception into the pending table. |
1853 | * |
1854 | * NOTE: a write lock must be held on the chunk's pending exception table slot |
1855 | * before calling this. |
1856 | */ |
1857 | static struct dm_snap_pending_exception * |
1858 | __insert_pending_exception(struct dm_snapshot *s, |
1859 | struct dm_snap_pending_exception *pe, chunk_t chunk) |
1860 | { |
1861 | pe->e.old_chunk = chunk; |
1862 | bio_list_init(bl: &pe->origin_bios); |
1863 | bio_list_init(bl: &pe->snapshot_bios); |
1864 | pe->started = 0; |
1865 | pe->full_bio = NULL; |
1866 | |
1867 | spin_lock(lock: &s->pe_allocation_lock); |
1868 | if (s->store->type->prepare_exception(s->store, &pe->e)) { |
1869 | spin_unlock(lock: &s->pe_allocation_lock); |
1870 | free_pending_exception(pe); |
1871 | return NULL; |
1872 | } |
1873 | |
1874 | pe->exception_sequence = s->exception_start_sequence++; |
1875 | spin_unlock(lock: &s->pe_allocation_lock); |
1876 | |
1877 | dm_insert_exception(eh: &s->pending, new_e: &pe->e); |
1878 | |
1879 | return pe; |
1880 | } |
1881 | |
1882 | /* |
1883 | * Looks to see if this snapshot already has a pending exception |
1884 | * for this chunk, otherwise it allocates a new one and inserts |
1885 | * it into the pending table. |
1886 | * |
1887 | * NOTE: a write lock must be held on the chunk's pending exception table slot |
1888 | * before calling this. |
1889 | */ |
1890 | static struct dm_snap_pending_exception * |
1891 | __find_pending_exception(struct dm_snapshot *s, |
1892 | struct dm_snap_pending_exception *pe, chunk_t chunk) |
1893 | { |
1894 | struct dm_snap_pending_exception *pe2; |
1895 | |
1896 | pe2 = __lookup_pending_exception(s, chunk); |
1897 | if (pe2) { |
1898 | free_pending_exception(pe); |
1899 | return pe2; |
1900 | } |
1901 | |
1902 | return __insert_pending_exception(s, pe, chunk); |
1903 | } |
1904 | |
1905 | static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, |
1906 | struct bio *bio, chunk_t chunk) |
1907 | { |
1908 | bio_set_dev(bio, bdev: s->cow->bdev); |
1909 | bio->bi_iter.bi_sector = |
1910 | chunk_to_sector(store: s->store, chunk: dm_chunk_number(chunk: e->new_chunk) + |
1911 | (chunk - e->old_chunk)) + |
1912 | (bio->bi_iter.bi_sector & s->store->chunk_mask); |
1913 | } |
1914 | |
1915 | static void zero_callback(int read_err, unsigned long write_err, void *context) |
1916 | { |
1917 | struct bio *bio = context; |
1918 | struct dm_snapshot *s = bio->bi_private; |
1919 | |
1920 | account_end_copy(s); |
1921 | bio->bi_status = write_err ? BLK_STS_IOERR : 0; |
1922 | bio_endio(bio); |
1923 | } |
1924 | |
1925 | static void zero_exception(struct dm_snapshot *s, struct dm_exception *e, |
1926 | struct bio *bio, chunk_t chunk) |
1927 | { |
1928 | struct dm_io_region dest; |
1929 | |
1930 | dest.bdev = s->cow->bdev; |
1931 | dest.sector = bio->bi_iter.bi_sector; |
1932 | dest.count = s->store->chunk_size; |
1933 | |
1934 | account_start_copy(s); |
1935 | WARN_ON_ONCE(bio->bi_private); |
1936 | bio->bi_private = s; |
1937 | dm_kcopyd_zero(kc: s->kcopyd_client, num_dests: 1, dests: &dest, flags: 0, fn: zero_callback, context: bio); |
1938 | } |
1939 | |
1940 | static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio) |
1941 | { |
1942 | return bio->bi_iter.bi_size == |
1943 | (s->store->chunk_size << SECTOR_SHIFT); |
1944 | } |
1945 | |
1946 | static int snapshot_map(struct dm_target *ti, struct bio *bio) |
1947 | { |
1948 | struct dm_exception *e; |
1949 | struct dm_snapshot *s = ti->private; |
1950 | int r = DM_MAPIO_REMAPPED; |
1951 | chunk_t chunk; |
1952 | struct dm_snap_pending_exception *pe = NULL; |
1953 | struct dm_exception_table_lock lock; |
1954 | |
1955 | init_tracked_chunk(bio); |
1956 | |
1957 | if (bio->bi_opf & REQ_PREFLUSH) { |
1958 | bio_set_dev(bio, bdev: s->cow->bdev); |
1959 | return DM_MAPIO_REMAPPED; |
1960 | } |
1961 | |
1962 | chunk = sector_to_chunk(store: s->store, sector: bio->bi_iter.bi_sector); |
1963 | dm_exception_table_lock_init(s, chunk, lock: &lock); |
1964 | |
1965 | /* Full snapshots are not usable */ |
1966 | /* To get here the table must be live so s->active is always set. */ |
1967 | if (!s->valid) |
1968 | return DM_MAPIO_KILL; |
1969 | |
1970 | if (bio_data_dir(bio) == WRITE) { |
1971 | while (unlikely(!wait_for_in_progress(s, false))) |
1972 | ; /* wait_for_in_progress() has slept */ |
1973 | } |
1974 | |
1975 | down_read(sem: &s->lock); |
1976 | dm_exception_table_lock(lock: &lock); |
1977 | |
1978 | if (!s->valid || (unlikely(s->snapshot_overflowed) && |
1979 | bio_data_dir(bio) == WRITE)) { |
1980 | r = DM_MAPIO_KILL; |
1981 | goto out_unlock; |
1982 | } |
1983 | |
1984 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { |
1985 | if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) { |
1986 | /* |
1987 | * passdown discard to origin (without triggering |
1988 | * snapshot exceptions via do_origin; doing so would |
1989 | * defeat the goal of freeing space in origin that is |
1990 | * implied by the "discard_passdown_origin" feature) |
1991 | */ |
1992 | bio_set_dev(bio, bdev: s->origin->bdev); |
1993 | track_chunk(s, bio, chunk); |
1994 | goto out_unlock; |
1995 | } |
1996 | /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */ |
1997 | } |
1998 | |
1999 | /* If the block is already remapped - use that, else remap it */ |
2000 | e = dm_lookup_exception(et: &s->complete, chunk); |
2001 | if (e) { |
2002 | remap_exception(s, e, bio, chunk); |
2003 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD) && |
2004 | io_overlaps_chunk(s, bio)) { |
2005 | dm_exception_table_unlock(lock: &lock); |
2006 | up_read(sem: &s->lock); |
2007 | zero_exception(s, e, bio, chunk); |
2008 | r = DM_MAPIO_SUBMITTED; /* discard is not issued */ |
2009 | goto out; |
2010 | } |
2011 | goto out_unlock; |
2012 | } |
2013 | |
2014 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { |
2015 | /* |
2016 | * If no exception exists, complete discard immediately |
2017 | * otherwise it'll trigger copy-out. |
2018 | */ |
2019 | bio_endio(bio); |
2020 | r = DM_MAPIO_SUBMITTED; |
2021 | goto out_unlock; |
2022 | } |
2023 | |
2024 | /* |
2025 | * Write to snapshot - higher level takes care of RW/RO |
2026 | * flags so we should only get this if we are |
2027 | * writable. |
2028 | */ |
2029 | if (bio_data_dir(bio) == WRITE) { |
2030 | pe = __lookup_pending_exception(s, chunk); |
2031 | if (!pe) { |
2032 | dm_exception_table_unlock(lock: &lock); |
2033 | pe = alloc_pending_exception(s); |
2034 | dm_exception_table_lock(lock: &lock); |
2035 | |
2036 | e = dm_lookup_exception(et: &s->complete, chunk); |
2037 | if (e) { |
2038 | free_pending_exception(pe); |
2039 | remap_exception(s, e, bio, chunk); |
2040 | goto out_unlock; |
2041 | } |
2042 | |
2043 | pe = __find_pending_exception(s, pe, chunk); |
2044 | if (!pe) { |
2045 | dm_exception_table_unlock(lock: &lock); |
2046 | up_read(sem: &s->lock); |
2047 | |
2048 | down_write(sem: &s->lock); |
2049 | |
2050 | if (s->store->userspace_supports_overflow) { |
2051 | if (s->valid && !s->snapshot_overflowed) { |
2052 | s->snapshot_overflowed = 1; |
2053 | DMERR("Snapshot overflowed: Unable to allocate exception." ); |
2054 | } |
2055 | } else |
2056 | __invalidate_snapshot(s, err: -ENOMEM); |
2057 | up_write(sem: &s->lock); |
2058 | |
2059 | r = DM_MAPIO_KILL; |
2060 | goto out; |
2061 | } |
2062 | } |
2063 | |
2064 | remap_exception(s, e: &pe->e, bio, chunk); |
2065 | |
2066 | r = DM_MAPIO_SUBMITTED; |
2067 | |
2068 | if (!pe->started && io_overlaps_chunk(s, bio)) { |
2069 | pe->started = 1; |
2070 | |
2071 | dm_exception_table_unlock(lock: &lock); |
2072 | up_read(sem: &s->lock); |
2073 | |
2074 | start_full_bio(pe, bio); |
2075 | goto out; |
2076 | } |
2077 | |
2078 | bio_list_add(bl: &pe->snapshot_bios, bio); |
2079 | |
2080 | if (!pe->started) { |
2081 | /* this is protected by the exception table lock */ |
2082 | pe->started = 1; |
2083 | |
2084 | dm_exception_table_unlock(lock: &lock); |
2085 | up_read(sem: &s->lock); |
2086 | |
2087 | start_copy(pe); |
2088 | goto out; |
2089 | } |
2090 | } else { |
2091 | bio_set_dev(bio, bdev: s->origin->bdev); |
2092 | track_chunk(s, bio, chunk); |
2093 | } |
2094 | |
2095 | out_unlock: |
2096 | dm_exception_table_unlock(lock: &lock); |
2097 | up_read(sem: &s->lock); |
2098 | out: |
2099 | return r; |
2100 | } |
2101 | |
2102 | /* |
2103 | * A snapshot-merge target behaves like a combination of a snapshot |
2104 | * target and a snapshot-origin target. It only generates new |
2105 | * exceptions in other snapshots and not in the one that is being |
2106 | * merged. |
2107 | * |
2108 | * For each chunk, if there is an existing exception, it is used to |
2109 | * redirect I/O to the cow device. Otherwise I/O is sent to the origin, |
2110 | * which in turn might generate exceptions in other snapshots. |
2111 | * If merging is currently taking place on the chunk in question, the |
2112 | * I/O is deferred by adding it to s->bios_queued_during_merge. |
2113 | */ |
2114 | static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) |
2115 | { |
2116 | struct dm_exception *e; |
2117 | struct dm_snapshot *s = ti->private; |
2118 | int r = DM_MAPIO_REMAPPED; |
2119 | chunk_t chunk; |
2120 | |
2121 | init_tracked_chunk(bio); |
2122 | |
2123 | if (bio->bi_opf & REQ_PREFLUSH) { |
2124 | if (!dm_bio_get_target_bio_nr(bio)) |
2125 | bio_set_dev(bio, bdev: s->origin->bdev); |
2126 | else |
2127 | bio_set_dev(bio, bdev: s->cow->bdev); |
2128 | return DM_MAPIO_REMAPPED; |
2129 | } |
2130 | |
2131 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { |
2132 | /* Once merging, discards no longer effect change */ |
2133 | bio_endio(bio); |
2134 | return DM_MAPIO_SUBMITTED; |
2135 | } |
2136 | |
2137 | chunk = sector_to_chunk(store: s->store, sector: bio->bi_iter.bi_sector); |
2138 | |
2139 | down_write(sem: &s->lock); |
2140 | |
2141 | /* Full merging snapshots are redirected to the origin */ |
2142 | if (!s->valid) |
2143 | goto redirect_to_origin; |
2144 | |
2145 | /* If the block is already remapped - use that */ |
2146 | e = dm_lookup_exception(et: &s->complete, chunk); |
2147 | if (e) { |
2148 | /* Queue writes overlapping with chunks being merged */ |
2149 | if (bio_data_dir(bio) == WRITE && |
2150 | chunk >= s->first_merging_chunk && |
2151 | chunk < (s->first_merging_chunk + |
2152 | s->num_merging_chunks)) { |
2153 | bio_set_dev(bio, bdev: s->origin->bdev); |
2154 | bio_list_add(bl: &s->bios_queued_during_merge, bio); |
2155 | r = DM_MAPIO_SUBMITTED; |
2156 | goto out_unlock; |
2157 | } |
2158 | |
2159 | remap_exception(s, e, bio, chunk); |
2160 | |
2161 | if (bio_data_dir(bio) == WRITE) |
2162 | track_chunk(s, bio, chunk); |
2163 | goto out_unlock; |
2164 | } |
2165 | |
2166 | redirect_to_origin: |
2167 | bio_set_dev(bio, bdev: s->origin->bdev); |
2168 | |
2169 | if (bio_data_dir(bio) == WRITE) { |
2170 | up_write(sem: &s->lock); |
2171 | return do_origin(origin: s->origin, bio, limit: false); |
2172 | } |
2173 | |
2174 | out_unlock: |
2175 | up_write(sem: &s->lock); |
2176 | |
2177 | return r; |
2178 | } |
2179 | |
2180 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, |
2181 | blk_status_t *error) |
2182 | { |
2183 | struct dm_snapshot *s = ti->private; |
2184 | |
2185 | if (is_bio_tracked(bio)) |
2186 | stop_tracking_chunk(s, bio); |
2187 | |
2188 | return DM_ENDIO_DONE; |
2189 | } |
2190 | |
2191 | static void snapshot_merge_presuspend(struct dm_target *ti) |
2192 | { |
2193 | struct dm_snapshot *s = ti->private; |
2194 | |
2195 | stop_merge(s); |
2196 | } |
2197 | |
2198 | static int snapshot_preresume(struct dm_target *ti) |
2199 | { |
2200 | int r = 0; |
2201 | struct dm_snapshot *s = ti->private; |
2202 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
2203 | |
2204 | down_read(sem: &_origins_lock); |
2205 | (void) __find_snapshots_sharing_cow(snap: s, snap_src: &snap_src, snap_dest: &snap_dest, NULL); |
2206 | if (snap_src && snap_dest) { |
2207 | down_read(sem: &snap_src->lock); |
2208 | if (s == snap_src) { |
2209 | DMERR("Unable to resume snapshot source until handover completes." ); |
2210 | r = -EINVAL; |
2211 | } else if (!dm_suspended(ti: snap_src->ti)) { |
2212 | DMERR("Unable to perform snapshot handover until source is suspended." ); |
2213 | r = -EINVAL; |
2214 | } |
2215 | up_read(sem: &snap_src->lock); |
2216 | } |
2217 | up_read(sem: &_origins_lock); |
2218 | |
2219 | return r; |
2220 | } |
2221 | |
2222 | static void snapshot_resume(struct dm_target *ti) |
2223 | { |
2224 | struct dm_snapshot *s = ti->private; |
2225 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; |
2226 | struct dm_origin *o; |
2227 | struct mapped_device *origin_md = NULL; |
2228 | bool must_restart_merging = false; |
2229 | |
2230 | down_read(sem: &_origins_lock); |
2231 | |
2232 | o = __lookup_dm_origin(origin: s->origin->bdev); |
2233 | if (o) |
2234 | origin_md = dm_table_get_md(t: o->ti->table); |
2235 | if (!origin_md) { |
2236 | (void) __find_snapshots_sharing_cow(snap: s, NULL, NULL, snap_merge: &snap_merging); |
2237 | if (snap_merging) |
2238 | origin_md = dm_table_get_md(t: snap_merging->ti->table); |
2239 | } |
2240 | if (origin_md == dm_table_get_md(t: ti->table)) |
2241 | origin_md = NULL; |
2242 | if (origin_md) { |
2243 | if (dm_hold(md: origin_md)) |
2244 | origin_md = NULL; |
2245 | } |
2246 | |
2247 | up_read(sem: &_origins_lock); |
2248 | |
2249 | if (origin_md) { |
2250 | dm_internal_suspend_fast(md: origin_md); |
2251 | if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { |
2252 | must_restart_merging = true; |
2253 | stop_merge(s: snap_merging); |
2254 | } |
2255 | } |
2256 | |
2257 | down_read(sem: &_origins_lock); |
2258 | |
2259 | (void) __find_snapshots_sharing_cow(snap: s, snap_src: &snap_src, snap_dest: &snap_dest, NULL); |
2260 | if (snap_src && snap_dest) { |
2261 | down_write(sem: &snap_src->lock); |
2262 | down_write_nested(sem: &snap_dest->lock, SINGLE_DEPTH_NESTING); |
2263 | __handover_exceptions(snap_src, snap_dest); |
2264 | up_write(sem: &snap_dest->lock); |
2265 | up_write(sem: &snap_src->lock); |
2266 | } |
2267 | |
2268 | up_read(sem: &_origins_lock); |
2269 | |
2270 | if (origin_md) { |
2271 | if (must_restart_merging) |
2272 | start_merge(s: snap_merging); |
2273 | dm_internal_resume_fast(md: origin_md); |
2274 | dm_put(md: origin_md); |
2275 | } |
2276 | |
2277 | /* Now we have correct chunk size, reregister */ |
2278 | reregister_snapshot(s); |
2279 | |
2280 | down_write(sem: &s->lock); |
2281 | s->active = 1; |
2282 | up_write(sem: &s->lock); |
2283 | } |
2284 | |
2285 | static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) |
2286 | { |
2287 | uint32_t min_chunksize; |
2288 | |
2289 | down_read(sem: &_origins_lock); |
2290 | min_chunksize = __minimum_chunk_size(o: __lookup_origin(origin: bdev)); |
2291 | up_read(sem: &_origins_lock); |
2292 | |
2293 | return min_chunksize; |
2294 | } |
2295 | |
2296 | static void snapshot_merge_resume(struct dm_target *ti) |
2297 | { |
2298 | struct dm_snapshot *s = ti->private; |
2299 | |
2300 | /* |
2301 | * Handover exceptions from existing snapshot. |
2302 | */ |
2303 | snapshot_resume(ti); |
2304 | |
2305 | /* |
2306 | * snapshot-merge acts as an origin, so set ti->max_io_len |
2307 | */ |
2308 | ti->max_io_len = get_origin_minimum_chunksize(bdev: s->origin->bdev); |
2309 | |
2310 | start_merge(s); |
2311 | } |
2312 | |
2313 | static void snapshot_status(struct dm_target *ti, status_type_t type, |
2314 | unsigned int status_flags, char *result, unsigned int maxlen) |
2315 | { |
2316 | unsigned int sz = 0; |
2317 | struct dm_snapshot *snap = ti->private; |
2318 | unsigned int num_features; |
2319 | |
2320 | switch (type) { |
2321 | case STATUSTYPE_INFO: |
2322 | |
2323 | down_write(sem: &snap->lock); |
2324 | |
2325 | if (!snap->valid) |
2326 | DMEMIT("Invalid" ); |
2327 | else if (snap->merge_failed) |
2328 | DMEMIT("Merge failed" ); |
2329 | else if (snap->snapshot_overflowed) |
2330 | DMEMIT("Overflow" ); |
2331 | else { |
2332 | if (snap->store->type->usage) { |
2333 | sector_t total_sectors, sectors_allocated, |
2334 | metadata_sectors; |
2335 | snap->store->type->usage(snap->store, |
2336 | &total_sectors, |
2337 | §ors_allocated, |
2338 | &metadata_sectors); |
2339 | DMEMIT("%llu/%llu %llu" , |
2340 | (unsigned long long)sectors_allocated, |
2341 | (unsigned long long)total_sectors, |
2342 | (unsigned long long)metadata_sectors); |
2343 | } else |
2344 | DMEMIT("Unknown" ); |
2345 | } |
2346 | |
2347 | up_write(sem: &snap->lock); |
2348 | |
2349 | break; |
2350 | |
2351 | case STATUSTYPE_TABLE: |
2352 | /* |
2353 | * kdevname returns a static pointer so we need |
2354 | * to make private copies if the output is to |
2355 | * make sense. |
2356 | */ |
2357 | DMEMIT("%s %s" , snap->origin->name, snap->cow->name); |
2358 | sz += snap->store->type->status(snap->store, type, result + sz, |
2359 | maxlen - sz); |
2360 | num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin; |
2361 | if (num_features) { |
2362 | DMEMIT(" %u" , num_features); |
2363 | if (snap->discard_zeroes_cow) |
2364 | DMEMIT(" discard_zeroes_cow" ); |
2365 | if (snap->discard_passdown_origin) |
2366 | DMEMIT(" discard_passdown_origin" ); |
2367 | } |
2368 | break; |
2369 | |
2370 | case STATUSTYPE_IMA: |
2371 | DMEMIT_TARGET_NAME_VERSION(ti->type); |
2372 | DMEMIT(",snap_origin_name=%s" , snap->origin->name); |
2373 | DMEMIT(",snap_cow_name=%s" , snap->cow->name); |
2374 | DMEMIT(",snap_valid=%c" , snap->valid ? 'y' : 'n'); |
2375 | DMEMIT(",snap_merge_failed=%c" , snap->merge_failed ? 'y' : 'n'); |
2376 | DMEMIT(",snapshot_overflowed=%c" , snap->snapshot_overflowed ? 'y' : 'n'); |
2377 | DMEMIT(";" ); |
2378 | break; |
2379 | } |
2380 | } |
2381 | |
2382 | static int snapshot_iterate_devices(struct dm_target *ti, |
2383 | iterate_devices_callout_fn fn, void *data) |
2384 | { |
2385 | struct dm_snapshot *snap = ti->private; |
2386 | int r; |
2387 | |
2388 | r = fn(ti, snap->origin, 0, ti->len, data); |
2389 | |
2390 | if (!r) |
2391 | r = fn(ti, snap->cow, 0, get_dev_size(bdev: snap->cow->bdev), data); |
2392 | |
2393 | return r; |
2394 | } |
2395 | |
2396 | static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits) |
2397 | { |
2398 | struct dm_snapshot *snap = ti->private; |
2399 | |
2400 | if (snap->discard_zeroes_cow) { |
2401 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
2402 | |
2403 | down_read(sem: &_origins_lock); |
2404 | |
2405 | (void) __find_snapshots_sharing_cow(snap, snap_src: &snap_src, snap_dest: &snap_dest, NULL); |
2406 | if (snap_src && snap_dest) |
2407 | snap = snap_src; |
2408 | |
2409 | /* All discards are split on chunk_size boundary */ |
2410 | limits->discard_granularity = snap->store->chunk_size; |
2411 | limits->max_discard_sectors = snap->store->chunk_size; |
2412 | |
2413 | up_read(sem: &_origins_lock); |
2414 | } |
2415 | } |
2416 | |
2417 | /* |
2418 | *--------------------------------------------------------------- |
2419 | * Origin methods |
2420 | *--------------------------------------------------------------- |
2421 | */ |
2422 | /* |
2423 | * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any |
2424 | * supplied bio was ignored. The caller may submit it immediately. |
2425 | * (No remapping actually occurs as the origin is always a direct linear |
2426 | * map.) |
2427 | * |
2428 | * If further exceptions are required, DM_MAPIO_SUBMITTED is returned |
2429 | * and any supplied bio is added to a list to be submitted once all |
2430 | * the necessary exceptions exist. |
2431 | */ |
2432 | static int __origin_write(struct list_head *snapshots, sector_t sector, |
2433 | struct bio *bio) |
2434 | { |
2435 | int r = DM_MAPIO_REMAPPED; |
2436 | struct dm_snapshot *snap; |
2437 | struct dm_exception *e; |
2438 | struct dm_snap_pending_exception *pe, *pe2; |
2439 | struct dm_snap_pending_exception *pe_to_start_now = NULL; |
2440 | struct dm_snap_pending_exception *pe_to_start_last = NULL; |
2441 | struct dm_exception_table_lock lock; |
2442 | chunk_t chunk; |
2443 | |
2444 | /* Do all the snapshots on this origin */ |
2445 | list_for_each_entry(snap, snapshots, list) { |
2446 | /* |
2447 | * Don't make new exceptions in a merging snapshot |
2448 | * because it has effectively been deleted |
2449 | */ |
2450 | if (dm_target_is_snapshot_merge(snap->ti)) |
2451 | continue; |
2452 | |
2453 | /* Nothing to do if writing beyond end of snapshot */ |
2454 | if (sector >= dm_table_get_size(t: snap->ti->table)) |
2455 | continue; |
2456 | |
2457 | /* |
2458 | * Remember, different snapshots can have |
2459 | * different chunk sizes. |
2460 | */ |
2461 | chunk = sector_to_chunk(store: snap->store, sector); |
2462 | dm_exception_table_lock_init(s: snap, chunk, lock: &lock); |
2463 | |
2464 | down_read(sem: &snap->lock); |
2465 | dm_exception_table_lock(lock: &lock); |
2466 | |
2467 | /* Only deal with valid and active snapshots */ |
2468 | if (!snap->valid || !snap->active) |
2469 | goto next_snapshot; |
2470 | |
2471 | pe = __lookup_pending_exception(s: snap, chunk); |
2472 | if (!pe) { |
2473 | /* |
2474 | * Check exception table to see if block is already |
2475 | * remapped in this snapshot and trigger an exception |
2476 | * if not. |
2477 | */ |
2478 | e = dm_lookup_exception(et: &snap->complete, chunk); |
2479 | if (e) |
2480 | goto next_snapshot; |
2481 | |
2482 | dm_exception_table_unlock(lock: &lock); |
2483 | pe = alloc_pending_exception(s: snap); |
2484 | dm_exception_table_lock(lock: &lock); |
2485 | |
2486 | pe2 = __lookup_pending_exception(s: snap, chunk); |
2487 | |
2488 | if (!pe2) { |
2489 | e = dm_lookup_exception(et: &snap->complete, chunk); |
2490 | if (e) { |
2491 | free_pending_exception(pe); |
2492 | goto next_snapshot; |
2493 | } |
2494 | |
2495 | pe = __insert_pending_exception(s: snap, pe, chunk); |
2496 | if (!pe) { |
2497 | dm_exception_table_unlock(lock: &lock); |
2498 | up_read(sem: &snap->lock); |
2499 | |
2500 | invalidate_snapshot(s: snap, err: -ENOMEM); |
2501 | continue; |
2502 | } |
2503 | } else { |
2504 | free_pending_exception(pe); |
2505 | pe = pe2; |
2506 | } |
2507 | } |
2508 | |
2509 | r = DM_MAPIO_SUBMITTED; |
2510 | |
2511 | /* |
2512 | * If an origin bio was supplied, queue it to wait for the |
2513 | * completion of this exception, and start this one last, |
2514 | * at the end of the function. |
2515 | */ |
2516 | if (bio) { |
2517 | bio_list_add(bl: &pe->origin_bios, bio); |
2518 | bio = NULL; |
2519 | |
2520 | if (!pe->started) { |
2521 | pe->started = 1; |
2522 | pe_to_start_last = pe; |
2523 | } |
2524 | } |
2525 | |
2526 | if (!pe->started) { |
2527 | pe->started = 1; |
2528 | pe_to_start_now = pe; |
2529 | } |
2530 | |
2531 | next_snapshot: |
2532 | dm_exception_table_unlock(lock: &lock); |
2533 | up_read(sem: &snap->lock); |
2534 | |
2535 | if (pe_to_start_now) { |
2536 | start_copy(pe: pe_to_start_now); |
2537 | pe_to_start_now = NULL; |
2538 | } |
2539 | } |
2540 | |
2541 | /* |
2542 | * Submit the exception against which the bio is queued last, |
2543 | * to give the other exceptions a head start. |
2544 | */ |
2545 | if (pe_to_start_last) |
2546 | start_copy(pe: pe_to_start_last); |
2547 | |
2548 | return r; |
2549 | } |
2550 | |
2551 | /* |
2552 | * Called on a write from the origin driver. |
2553 | */ |
2554 | static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit) |
2555 | { |
2556 | struct origin *o; |
2557 | int r = DM_MAPIO_REMAPPED; |
2558 | |
2559 | again: |
2560 | down_read(sem: &_origins_lock); |
2561 | o = __lookup_origin(origin: origin->bdev); |
2562 | if (o) { |
2563 | if (limit) { |
2564 | struct dm_snapshot *s; |
2565 | |
2566 | list_for_each_entry(s, &o->snapshots, list) |
2567 | if (unlikely(!wait_for_in_progress(s, true))) |
2568 | goto again; |
2569 | } |
2570 | |
2571 | r = __origin_write(snapshots: &o->snapshots, sector: bio->bi_iter.bi_sector, bio); |
2572 | } |
2573 | up_read(sem: &_origins_lock); |
2574 | |
2575 | return r; |
2576 | } |
2577 | |
2578 | /* |
2579 | * Trigger exceptions in all non-merging snapshots. |
2580 | * |
2581 | * The chunk size of the merging snapshot may be larger than the chunk |
2582 | * size of some other snapshot so we may need to reallocate multiple |
2583 | * chunks in other snapshots. |
2584 | * |
2585 | * We scan all the overlapping exceptions in the other snapshots. |
2586 | * Returns 1 if anything was reallocated and must be waited for, |
2587 | * otherwise returns 0. |
2588 | * |
2589 | * size must be a multiple of merging_snap's chunk_size. |
2590 | */ |
2591 | static int origin_write_extent(struct dm_snapshot *merging_snap, |
2592 | sector_t sector, unsigned int size) |
2593 | { |
2594 | int must_wait = 0; |
2595 | sector_t n; |
2596 | struct origin *o; |
2597 | |
2598 | /* |
2599 | * The origin's __minimum_chunk_size() got stored in max_io_len |
2600 | * by snapshot_merge_resume(). |
2601 | */ |
2602 | down_read(sem: &_origins_lock); |
2603 | o = __lookup_origin(origin: merging_snap->origin->bdev); |
2604 | for (n = 0; n < size; n += merging_snap->ti->max_io_len) |
2605 | if (__origin_write(snapshots: &o->snapshots, sector: sector + n, NULL) == |
2606 | DM_MAPIO_SUBMITTED) |
2607 | must_wait = 1; |
2608 | up_read(sem: &_origins_lock); |
2609 | |
2610 | return must_wait; |
2611 | } |
2612 | |
2613 | /* |
2614 | * Origin: maps a linear range of a device, with hooks for snapshotting. |
2615 | */ |
2616 | |
2617 | /* |
2618 | * Construct an origin mapping: <dev_path> |
2619 | * The context for an origin is merely a 'struct dm_dev *' |
2620 | * pointing to the real device. |
2621 | */ |
2622 | static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
2623 | { |
2624 | int r; |
2625 | struct dm_origin *o; |
2626 | |
2627 | if (argc != 1) { |
2628 | ti->error = "origin: incorrect number of arguments" ; |
2629 | return -EINVAL; |
2630 | } |
2631 | |
2632 | o = kmalloc(size: sizeof(struct dm_origin), GFP_KERNEL); |
2633 | if (!o) { |
2634 | ti->error = "Cannot allocate private origin structure" ; |
2635 | r = -ENOMEM; |
2636 | goto bad_alloc; |
2637 | } |
2638 | |
2639 | r = dm_get_device(ti, path: argv[0], mode: dm_table_get_mode(t: ti->table), result: &o->dev); |
2640 | if (r) { |
2641 | ti->error = "Cannot get target device" ; |
2642 | goto bad_open; |
2643 | } |
2644 | |
2645 | o->ti = ti; |
2646 | ti->private = o; |
2647 | ti->num_flush_bios = 1; |
2648 | |
2649 | return 0; |
2650 | |
2651 | bad_open: |
2652 | kfree(objp: o); |
2653 | bad_alloc: |
2654 | return r; |
2655 | } |
2656 | |
2657 | static void origin_dtr(struct dm_target *ti) |
2658 | { |
2659 | struct dm_origin *o = ti->private; |
2660 | |
2661 | dm_put_device(ti, d: o->dev); |
2662 | kfree(objp: o); |
2663 | } |
2664 | |
2665 | static int origin_map(struct dm_target *ti, struct bio *bio) |
2666 | { |
2667 | struct dm_origin *o = ti->private; |
2668 | unsigned int available_sectors; |
2669 | |
2670 | bio_set_dev(bio, bdev: o->dev->bdev); |
2671 | |
2672 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) |
2673 | return DM_MAPIO_REMAPPED; |
2674 | |
2675 | if (bio_data_dir(bio) != WRITE) |
2676 | return DM_MAPIO_REMAPPED; |
2677 | |
2678 | available_sectors = o->split_boundary - |
2679 | ((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1)); |
2680 | |
2681 | if (bio_sectors(bio) > available_sectors) |
2682 | dm_accept_partial_bio(bio, n_sectors: available_sectors); |
2683 | |
2684 | /* Only tell snapshots if this is a write */ |
2685 | return do_origin(origin: o->dev, bio, limit: true); |
2686 | } |
2687 | |
2688 | /* |
2689 | * Set the target "max_io_len" field to the minimum of all the snapshots' |
2690 | * chunk sizes. |
2691 | */ |
2692 | static void origin_resume(struct dm_target *ti) |
2693 | { |
2694 | struct dm_origin *o = ti->private; |
2695 | |
2696 | o->split_boundary = get_origin_minimum_chunksize(bdev: o->dev->bdev); |
2697 | |
2698 | down_write(sem: &_origins_lock); |
2699 | __insert_dm_origin(o); |
2700 | up_write(sem: &_origins_lock); |
2701 | } |
2702 | |
2703 | static void origin_postsuspend(struct dm_target *ti) |
2704 | { |
2705 | struct dm_origin *o = ti->private; |
2706 | |
2707 | down_write(sem: &_origins_lock); |
2708 | __remove_dm_origin(o); |
2709 | up_write(sem: &_origins_lock); |
2710 | } |
2711 | |
2712 | static void origin_status(struct dm_target *ti, status_type_t type, |
2713 | unsigned int status_flags, char *result, unsigned int maxlen) |
2714 | { |
2715 | struct dm_origin *o = ti->private; |
2716 | |
2717 | switch (type) { |
2718 | case STATUSTYPE_INFO: |
2719 | result[0] = '\0'; |
2720 | break; |
2721 | |
2722 | case STATUSTYPE_TABLE: |
2723 | snprintf(buf: result, size: maxlen, fmt: "%s" , o->dev->name); |
2724 | break; |
2725 | case STATUSTYPE_IMA: |
2726 | result[0] = '\0'; |
2727 | break; |
2728 | } |
2729 | } |
2730 | |
2731 | static int origin_iterate_devices(struct dm_target *ti, |
2732 | iterate_devices_callout_fn fn, void *data) |
2733 | { |
2734 | struct dm_origin *o = ti->private; |
2735 | |
2736 | return fn(ti, o->dev, 0, ti->len, data); |
2737 | } |
2738 | |
2739 | static struct target_type origin_target = { |
2740 | .name = "snapshot-origin" , |
2741 | .version = {1, 9, 0}, |
2742 | .module = THIS_MODULE, |
2743 | .ctr = origin_ctr, |
2744 | .dtr = origin_dtr, |
2745 | .map = origin_map, |
2746 | .resume = origin_resume, |
2747 | .postsuspend = origin_postsuspend, |
2748 | .status = origin_status, |
2749 | .iterate_devices = origin_iterate_devices, |
2750 | }; |
2751 | |
2752 | static struct target_type snapshot_target = { |
2753 | .name = "snapshot" , |
2754 | .version = {1, 16, 0}, |
2755 | .module = THIS_MODULE, |
2756 | .ctr = snapshot_ctr, |
2757 | .dtr = snapshot_dtr, |
2758 | .map = snapshot_map, |
2759 | .end_io = snapshot_end_io, |
2760 | .preresume = snapshot_preresume, |
2761 | .resume = snapshot_resume, |
2762 | .status = snapshot_status, |
2763 | .iterate_devices = snapshot_iterate_devices, |
2764 | .io_hints = snapshot_io_hints, |
2765 | }; |
2766 | |
2767 | static struct target_type merge_target = { |
2768 | .name = dm_snapshot_merge_target_name, |
2769 | .version = {1, 5, 0}, |
2770 | .module = THIS_MODULE, |
2771 | .ctr = snapshot_ctr, |
2772 | .dtr = snapshot_dtr, |
2773 | .map = snapshot_merge_map, |
2774 | .end_io = snapshot_end_io, |
2775 | .presuspend = snapshot_merge_presuspend, |
2776 | .preresume = snapshot_preresume, |
2777 | .resume = snapshot_merge_resume, |
2778 | .status = snapshot_status, |
2779 | .iterate_devices = snapshot_iterate_devices, |
2780 | .io_hints = snapshot_io_hints, |
2781 | }; |
2782 | |
2783 | static int __init dm_snapshot_init(void) |
2784 | { |
2785 | int r; |
2786 | |
2787 | r = dm_exception_store_init(); |
2788 | if (r) { |
2789 | DMERR("Failed to initialize exception stores" ); |
2790 | return r; |
2791 | } |
2792 | |
2793 | r = init_origin_hash(); |
2794 | if (r) { |
2795 | DMERR("init_origin_hash failed." ); |
2796 | goto bad_origin_hash; |
2797 | } |
2798 | |
2799 | exception_cache = KMEM_CACHE(dm_exception, 0); |
2800 | if (!exception_cache) { |
2801 | DMERR("Couldn't create exception cache." ); |
2802 | r = -ENOMEM; |
2803 | goto bad_exception_cache; |
2804 | } |
2805 | |
2806 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
2807 | if (!pending_cache) { |
2808 | DMERR("Couldn't create pending cache." ); |
2809 | r = -ENOMEM; |
2810 | goto bad_pending_cache; |
2811 | } |
2812 | |
2813 | r = dm_register_target(t: &snapshot_target); |
2814 | if (r < 0) |
2815 | goto bad_register_snapshot_target; |
2816 | |
2817 | r = dm_register_target(t: &origin_target); |
2818 | if (r < 0) |
2819 | goto bad_register_origin_target; |
2820 | |
2821 | r = dm_register_target(t: &merge_target); |
2822 | if (r < 0) |
2823 | goto bad_register_merge_target; |
2824 | |
2825 | return 0; |
2826 | |
2827 | bad_register_merge_target: |
2828 | dm_unregister_target(t: &origin_target); |
2829 | bad_register_origin_target: |
2830 | dm_unregister_target(t: &snapshot_target); |
2831 | bad_register_snapshot_target: |
2832 | kmem_cache_destroy(s: pending_cache); |
2833 | bad_pending_cache: |
2834 | kmem_cache_destroy(s: exception_cache); |
2835 | bad_exception_cache: |
2836 | exit_origin_hash(); |
2837 | bad_origin_hash: |
2838 | dm_exception_store_exit(); |
2839 | |
2840 | return r; |
2841 | } |
2842 | |
2843 | static void __exit dm_snapshot_exit(void) |
2844 | { |
2845 | dm_unregister_target(t: &snapshot_target); |
2846 | dm_unregister_target(t: &origin_target); |
2847 | dm_unregister_target(t: &merge_target); |
2848 | |
2849 | exit_origin_hash(); |
2850 | kmem_cache_destroy(s: pending_cache); |
2851 | kmem_cache_destroy(s: exception_cache); |
2852 | |
2853 | dm_exception_store_exit(); |
2854 | } |
2855 | |
2856 | /* Module hooks */ |
2857 | module_init(dm_snapshot_init); |
2858 | module_exit(dm_snapshot_exit); |
2859 | |
2860 | MODULE_DESCRIPTION(DM_NAME " snapshot target" ); |
2861 | MODULE_AUTHOR("Joe Thornber" ); |
2862 | MODULE_LICENSE("GPL" ); |
2863 | MODULE_ALIAS("dm-snapshot-origin" ); |
2864 | MODULE_ALIAS("dm-snapshot-merge" ); |
2865 | |