1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/fs/namespace.c |
4 | * |
5 | * (C) Copyright Al Viro 2000, 2001 |
6 | * |
7 | * Based on code from fs/super.c, copyright Linus Torvalds and others. |
8 | * Heavily rewritten. |
9 | */ |
10 | |
11 | #include <linux/syscalls.h> |
12 | #include <linux/export.h> |
13 | #include <linux/capability.h> |
14 | #include <linux/mnt_namespace.h> |
15 | #include <linux/user_namespace.h> |
16 | #include <linux/namei.h> |
17 | #include <linux/security.h> |
18 | #include <linux/cred.h> |
19 | #include <linux/idr.h> |
20 | #include <linux/init.h> /* init_rootfs */ |
21 | #include <linux/fs_struct.h> /* get_fs_root et.al. */ |
22 | #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ |
23 | #include <linux/file.h> |
24 | #include <linux/uaccess.h> |
25 | #include <linux/proc_ns.h> |
26 | #include <linux/magic.h> |
27 | #include <linux/memblock.h> |
28 | #include <linux/proc_fs.h> |
29 | #include <linux/task_work.h> |
30 | #include <linux/sched/task.h> |
31 | #include <uapi/linux/mount.h> |
32 | #include <linux/fs_context.h> |
33 | #include <linux/shmem_fs.h> |
34 | #include <linux/mnt_idmapping.h> |
35 | |
36 | #include "pnode.h" |
37 | #include "internal.h" |
38 | |
39 | /* Maximum number of mounts in a mount namespace */ |
40 | static unsigned int sysctl_mount_max __read_mostly = 100000; |
41 | |
42 | static unsigned int m_hash_mask __ro_after_init; |
43 | static unsigned int m_hash_shift __ro_after_init; |
44 | static unsigned int mp_hash_mask __ro_after_init; |
45 | static unsigned int mp_hash_shift __ro_after_init; |
46 | |
47 | static __initdata unsigned long mhash_entries; |
48 | static int __init set_mhash_entries(char *str) |
49 | { |
50 | if (!str) |
51 | return 0; |
52 | mhash_entries = simple_strtoul(str, &str, 0); |
53 | return 1; |
54 | } |
55 | __setup("mhash_entries=" , set_mhash_entries); |
56 | |
57 | static __initdata unsigned long mphash_entries; |
58 | static int __init set_mphash_entries(char *str) |
59 | { |
60 | if (!str) |
61 | return 0; |
62 | mphash_entries = simple_strtoul(str, &str, 0); |
63 | return 1; |
64 | } |
65 | __setup("mphash_entries=" , set_mphash_entries); |
66 | |
67 | static u64 event; |
68 | static DEFINE_IDA(mnt_id_ida); |
69 | static DEFINE_IDA(mnt_group_ida); |
70 | |
71 | static struct hlist_head *mount_hashtable __ro_after_init; |
72 | static struct hlist_head *mountpoint_hashtable __ro_after_init; |
73 | static struct kmem_cache *mnt_cache __ro_after_init; |
74 | static DECLARE_RWSEM(namespace_sem); |
75 | static HLIST_HEAD(unmounted); /* protected by namespace_sem */ |
76 | static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ |
77 | |
78 | struct mount_kattr { |
79 | unsigned int attr_set; |
80 | unsigned int attr_clr; |
81 | unsigned int propagation; |
82 | unsigned int lookup_flags; |
83 | bool recurse; |
84 | struct user_namespace *mnt_userns; |
85 | struct mnt_idmap *mnt_idmap; |
86 | }; |
87 | |
88 | /* /sys/fs */ |
89 | struct kobject *fs_kobj __ro_after_init; |
90 | EXPORT_SYMBOL_GPL(fs_kobj); |
91 | |
92 | /* |
93 | * vfsmount lock may be taken for read to prevent changes to the |
94 | * vfsmount hash, ie. during mountpoint lookups or walking back |
95 | * up the tree. |
96 | * |
97 | * It should be taken for write in all cases where the vfsmount |
98 | * tree or hash is modified or when a vfsmount structure is modified. |
99 | */ |
100 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); |
101 | |
102 | static inline void lock_mount_hash(void) |
103 | { |
104 | write_seqlock(sl: &mount_lock); |
105 | } |
106 | |
107 | static inline void unlock_mount_hash(void) |
108 | { |
109 | write_sequnlock(sl: &mount_lock); |
110 | } |
111 | |
112 | static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) |
113 | { |
114 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
115 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); |
116 | tmp = tmp + (tmp >> m_hash_shift); |
117 | return &mount_hashtable[tmp & m_hash_mask]; |
118 | } |
119 | |
120 | static inline struct hlist_head *mp_hash(struct dentry *dentry) |
121 | { |
122 | unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); |
123 | tmp = tmp + (tmp >> mp_hash_shift); |
124 | return &mountpoint_hashtable[tmp & mp_hash_mask]; |
125 | } |
126 | |
127 | static int mnt_alloc_id(struct mount *mnt) |
128 | { |
129 | int res = ida_alloc(ida: &mnt_id_ida, GFP_KERNEL); |
130 | |
131 | if (res < 0) |
132 | return res; |
133 | mnt->mnt_id = res; |
134 | return 0; |
135 | } |
136 | |
137 | static void mnt_free_id(struct mount *mnt) |
138 | { |
139 | ida_free(&mnt_id_ida, id: mnt->mnt_id); |
140 | } |
141 | |
142 | /* |
143 | * Allocate a new peer group ID |
144 | */ |
145 | static int mnt_alloc_group_id(struct mount *mnt) |
146 | { |
147 | int res = ida_alloc_min(ida: &mnt_group_ida, min: 1, GFP_KERNEL); |
148 | |
149 | if (res < 0) |
150 | return res; |
151 | mnt->mnt_group_id = res; |
152 | return 0; |
153 | } |
154 | |
155 | /* |
156 | * Release a peer group ID |
157 | */ |
158 | void mnt_release_group_id(struct mount *mnt) |
159 | { |
160 | ida_free(&mnt_group_ida, id: mnt->mnt_group_id); |
161 | mnt->mnt_group_id = 0; |
162 | } |
163 | |
164 | /* |
165 | * vfsmount lock must be held for read |
166 | */ |
167 | static inline void mnt_add_count(struct mount *mnt, int n) |
168 | { |
169 | #ifdef CONFIG_SMP |
170 | this_cpu_add(mnt->mnt_pcp->mnt_count, n); |
171 | #else |
172 | preempt_disable(); |
173 | mnt->mnt_count += n; |
174 | preempt_enable(); |
175 | #endif |
176 | } |
177 | |
178 | /* |
179 | * vfsmount lock must be held for write |
180 | */ |
181 | int mnt_get_count(struct mount *mnt) |
182 | { |
183 | #ifdef CONFIG_SMP |
184 | int count = 0; |
185 | int cpu; |
186 | |
187 | for_each_possible_cpu(cpu) { |
188 | count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; |
189 | } |
190 | |
191 | return count; |
192 | #else |
193 | return mnt->mnt_count; |
194 | #endif |
195 | } |
196 | |
197 | static struct mount *alloc_vfsmnt(const char *name) |
198 | { |
199 | struct mount *mnt = kmem_cache_zalloc(k: mnt_cache, GFP_KERNEL); |
200 | if (mnt) { |
201 | int err; |
202 | |
203 | err = mnt_alloc_id(mnt); |
204 | if (err) |
205 | goto out_free_cache; |
206 | |
207 | if (name) { |
208 | mnt->mnt_devname = kstrdup_const(s: name, |
209 | GFP_KERNEL_ACCOUNT); |
210 | if (!mnt->mnt_devname) |
211 | goto out_free_id; |
212 | } |
213 | |
214 | #ifdef CONFIG_SMP |
215 | mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); |
216 | if (!mnt->mnt_pcp) |
217 | goto out_free_devname; |
218 | |
219 | this_cpu_add(mnt->mnt_pcp->mnt_count, 1); |
220 | #else |
221 | mnt->mnt_count = 1; |
222 | mnt->mnt_writers = 0; |
223 | #endif |
224 | |
225 | INIT_HLIST_NODE(h: &mnt->mnt_hash); |
226 | INIT_LIST_HEAD(list: &mnt->mnt_child); |
227 | INIT_LIST_HEAD(list: &mnt->mnt_mounts); |
228 | INIT_LIST_HEAD(list: &mnt->mnt_list); |
229 | INIT_LIST_HEAD(list: &mnt->mnt_expire); |
230 | INIT_LIST_HEAD(list: &mnt->mnt_share); |
231 | INIT_LIST_HEAD(list: &mnt->mnt_slave_list); |
232 | INIT_LIST_HEAD(list: &mnt->mnt_slave); |
233 | INIT_HLIST_NODE(h: &mnt->mnt_mp_list); |
234 | INIT_LIST_HEAD(list: &mnt->mnt_umounting); |
235 | INIT_HLIST_HEAD(&mnt->mnt_stuck_children); |
236 | mnt->mnt.mnt_idmap = &nop_mnt_idmap; |
237 | } |
238 | return mnt; |
239 | |
240 | #ifdef CONFIG_SMP |
241 | out_free_devname: |
242 | kfree_const(x: mnt->mnt_devname); |
243 | #endif |
244 | out_free_id: |
245 | mnt_free_id(mnt); |
246 | out_free_cache: |
247 | kmem_cache_free(s: mnt_cache, objp: mnt); |
248 | return NULL; |
249 | } |
250 | |
251 | /* |
252 | * Most r/o checks on a fs are for operations that take |
253 | * discrete amounts of time, like a write() or unlink(). |
254 | * We must keep track of when those operations start |
255 | * (for permission checks) and when they end, so that |
256 | * we can determine when writes are able to occur to |
257 | * a filesystem. |
258 | */ |
259 | /* |
260 | * __mnt_is_readonly: check whether a mount is read-only |
261 | * @mnt: the mount to check for its write status |
262 | * |
263 | * This shouldn't be used directly ouside of the VFS. |
264 | * It does not guarantee that the filesystem will stay |
265 | * r/w, just that it is right *now*. This can not and |
266 | * should not be used in place of IS_RDONLY(inode). |
267 | * mnt_want/drop_write() will _keep_ the filesystem |
268 | * r/w. |
269 | */ |
270 | bool __mnt_is_readonly(struct vfsmount *mnt) |
271 | { |
272 | return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(sb: mnt->mnt_sb); |
273 | } |
274 | EXPORT_SYMBOL_GPL(__mnt_is_readonly); |
275 | |
276 | static inline void mnt_inc_writers(struct mount *mnt) |
277 | { |
278 | #ifdef CONFIG_SMP |
279 | this_cpu_inc(mnt->mnt_pcp->mnt_writers); |
280 | #else |
281 | mnt->mnt_writers++; |
282 | #endif |
283 | } |
284 | |
285 | static inline void mnt_dec_writers(struct mount *mnt) |
286 | { |
287 | #ifdef CONFIG_SMP |
288 | this_cpu_dec(mnt->mnt_pcp->mnt_writers); |
289 | #else |
290 | mnt->mnt_writers--; |
291 | #endif |
292 | } |
293 | |
294 | static unsigned int mnt_get_writers(struct mount *mnt) |
295 | { |
296 | #ifdef CONFIG_SMP |
297 | unsigned int count = 0; |
298 | int cpu; |
299 | |
300 | for_each_possible_cpu(cpu) { |
301 | count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; |
302 | } |
303 | |
304 | return count; |
305 | #else |
306 | return mnt->mnt_writers; |
307 | #endif |
308 | } |
309 | |
310 | static int mnt_is_readonly(struct vfsmount *mnt) |
311 | { |
312 | if (READ_ONCE(mnt->mnt_sb->s_readonly_remount)) |
313 | return 1; |
314 | /* |
315 | * The barrier pairs with the barrier in sb_start_ro_state_change() |
316 | * making sure if we don't see s_readonly_remount set yet, we also will |
317 | * not see any superblock / mount flag changes done by remount. |
318 | * It also pairs with the barrier in sb_end_ro_state_change() |
319 | * assuring that if we see s_readonly_remount already cleared, we will |
320 | * see the values of superblock / mount flags updated by remount. |
321 | */ |
322 | smp_rmb(); |
323 | return __mnt_is_readonly(mnt); |
324 | } |
325 | |
326 | /* |
327 | * Most r/o & frozen checks on a fs are for operations that take discrete |
328 | * amounts of time, like a write() or unlink(). We must keep track of when |
329 | * those operations start (for permission checks) and when they end, so that we |
330 | * can determine when writes are able to occur to a filesystem. |
331 | */ |
332 | /** |
333 | * mnt_get_write_access - get write access to a mount without freeze protection |
334 | * @m: the mount on which to take a write |
335 | * |
336 | * This tells the low-level filesystem that a write is about to be performed to |
337 | * it, and makes sure that writes are allowed (mnt it read-write) before |
338 | * returning success. This operation does not protect against filesystem being |
339 | * frozen. When the write operation is finished, mnt_put_write_access() must be |
340 | * called. This is effectively a refcount. |
341 | */ |
342 | int mnt_get_write_access(struct vfsmount *m) |
343 | { |
344 | struct mount *mnt = real_mount(mnt: m); |
345 | int ret = 0; |
346 | |
347 | preempt_disable(); |
348 | mnt_inc_writers(mnt); |
349 | /* |
350 | * The store to mnt_inc_writers must be visible before we pass |
351 | * MNT_WRITE_HOLD loop below, so that the slowpath can see our |
352 | * incremented count after it has set MNT_WRITE_HOLD. |
353 | */ |
354 | smp_mb(); |
355 | might_lock(&mount_lock.lock); |
356 | while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { |
357 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
358 | cpu_relax(); |
359 | } else { |
360 | /* |
361 | * This prevents priority inversion, if the task |
362 | * setting MNT_WRITE_HOLD got preempted on a remote |
363 | * CPU, and it prevents life lock if the task setting |
364 | * MNT_WRITE_HOLD has a lower priority and is bound to |
365 | * the same CPU as the task that is spinning here. |
366 | */ |
367 | preempt_enable(); |
368 | lock_mount_hash(); |
369 | unlock_mount_hash(); |
370 | preempt_disable(); |
371 | } |
372 | } |
373 | /* |
374 | * The barrier pairs with the barrier sb_start_ro_state_change() making |
375 | * sure that if we see MNT_WRITE_HOLD cleared, we will also see |
376 | * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in |
377 | * mnt_is_readonly() and bail in case we are racing with remount |
378 | * read-only. |
379 | */ |
380 | smp_rmb(); |
381 | if (mnt_is_readonly(mnt: m)) { |
382 | mnt_dec_writers(mnt); |
383 | ret = -EROFS; |
384 | } |
385 | preempt_enable(); |
386 | |
387 | return ret; |
388 | } |
389 | EXPORT_SYMBOL_GPL(mnt_get_write_access); |
390 | |
391 | /** |
392 | * mnt_want_write - get write access to a mount |
393 | * @m: the mount on which to take a write |
394 | * |
395 | * This tells the low-level filesystem that a write is about to be performed to |
396 | * it, and makes sure that writes are allowed (mount is read-write, filesystem |
397 | * is not frozen) before returning success. When the write operation is |
398 | * finished, mnt_drop_write() must be called. This is effectively a refcount. |
399 | */ |
400 | int mnt_want_write(struct vfsmount *m) |
401 | { |
402 | int ret; |
403 | |
404 | sb_start_write(sb: m->mnt_sb); |
405 | ret = mnt_get_write_access(m); |
406 | if (ret) |
407 | sb_end_write(sb: m->mnt_sb); |
408 | return ret; |
409 | } |
410 | EXPORT_SYMBOL_GPL(mnt_want_write); |
411 | |
412 | /** |
413 | * mnt_get_write_access_file - get write access to a file's mount |
414 | * @file: the file who's mount on which to take a write |
415 | * |
416 | * This is like mnt_get_write_access, but if @file is already open for write it |
417 | * skips incrementing mnt_writers (since the open file already has a reference) |
418 | * and instead only does the check for emergency r/o remounts. This must be |
419 | * paired with mnt_put_write_access_file. |
420 | */ |
421 | int mnt_get_write_access_file(struct file *file) |
422 | { |
423 | if (file->f_mode & FMODE_WRITER) { |
424 | /* |
425 | * Superblock may have become readonly while there are still |
426 | * writable fd's, e.g. due to a fs error with errors=remount-ro |
427 | */ |
428 | if (__mnt_is_readonly(file->f_path.mnt)) |
429 | return -EROFS; |
430 | return 0; |
431 | } |
432 | return mnt_get_write_access(file->f_path.mnt); |
433 | } |
434 | |
435 | /** |
436 | * mnt_want_write_file - get write access to a file's mount |
437 | * @file: the file who's mount on which to take a write |
438 | * |
439 | * This is like mnt_want_write, but if the file is already open for writing it |
440 | * skips incrementing mnt_writers (since the open file already has a reference) |
441 | * and instead only does the freeze protection and the check for emergency r/o |
442 | * remounts. This must be paired with mnt_drop_write_file. |
443 | */ |
444 | int mnt_want_write_file(struct file *file) |
445 | { |
446 | int ret; |
447 | |
448 | sb_start_write(sb: file_inode(f: file)->i_sb); |
449 | ret = mnt_get_write_access_file(file); |
450 | if (ret) |
451 | sb_end_write(sb: file_inode(f: file)->i_sb); |
452 | return ret; |
453 | } |
454 | EXPORT_SYMBOL_GPL(mnt_want_write_file); |
455 | |
456 | /** |
457 | * mnt_put_write_access - give up write access to a mount |
458 | * @mnt: the mount on which to give up write access |
459 | * |
460 | * Tells the low-level filesystem that we are done |
461 | * performing writes to it. Must be matched with |
462 | * mnt_get_write_access() call above. |
463 | */ |
464 | void mnt_put_write_access(struct vfsmount *mnt) |
465 | { |
466 | preempt_disable(); |
467 | mnt_dec_writers(mnt: real_mount(mnt)); |
468 | preempt_enable(); |
469 | } |
470 | EXPORT_SYMBOL_GPL(mnt_put_write_access); |
471 | |
472 | /** |
473 | * mnt_drop_write - give up write access to a mount |
474 | * @mnt: the mount on which to give up write access |
475 | * |
476 | * Tells the low-level filesystem that we are done performing writes to it and |
477 | * also allows filesystem to be frozen again. Must be matched with |
478 | * mnt_want_write() call above. |
479 | */ |
480 | void mnt_drop_write(struct vfsmount *mnt) |
481 | { |
482 | mnt_put_write_access(mnt); |
483 | sb_end_write(sb: mnt->mnt_sb); |
484 | } |
485 | EXPORT_SYMBOL_GPL(mnt_drop_write); |
486 | |
487 | void mnt_put_write_access_file(struct file *file) |
488 | { |
489 | if (!(file->f_mode & FMODE_WRITER)) |
490 | mnt_put_write_access(file->f_path.mnt); |
491 | } |
492 | |
493 | void mnt_drop_write_file(struct file *file) |
494 | { |
495 | mnt_put_write_access_file(file); |
496 | sb_end_write(sb: file_inode(f: file)->i_sb); |
497 | } |
498 | EXPORT_SYMBOL(mnt_drop_write_file); |
499 | |
500 | /** |
501 | * mnt_hold_writers - prevent write access to the given mount |
502 | * @mnt: mnt to prevent write access to |
503 | * |
504 | * Prevents write access to @mnt if there are no active writers for @mnt. |
505 | * This function needs to be called and return successfully before changing |
506 | * properties of @mnt that need to remain stable for callers with write access |
507 | * to @mnt. |
508 | * |
509 | * After this functions has been called successfully callers must pair it with |
510 | * a call to mnt_unhold_writers() in order to stop preventing write access to |
511 | * @mnt. |
512 | * |
513 | * Context: This function expects lock_mount_hash() to be held serializing |
514 | * setting MNT_WRITE_HOLD. |
515 | * Return: On success 0 is returned. |
516 | * On error, -EBUSY is returned. |
517 | */ |
518 | static inline int mnt_hold_writers(struct mount *mnt) |
519 | { |
520 | mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; |
521 | /* |
522 | * After storing MNT_WRITE_HOLD, we'll read the counters. This store |
523 | * should be visible before we do. |
524 | */ |
525 | smp_mb(); |
526 | |
527 | /* |
528 | * With writers on hold, if this value is zero, then there are |
529 | * definitely no active writers (although held writers may subsequently |
530 | * increment the count, they'll have to wait, and decrement it after |
531 | * seeing MNT_READONLY). |
532 | * |
533 | * It is OK to have counter incremented on one CPU and decremented on |
534 | * another: the sum will add up correctly. The danger would be when we |
535 | * sum up each counter, if we read a counter before it is incremented, |
536 | * but then read another CPU's count which it has been subsequently |
537 | * decremented from -- we would see more decrements than we should. |
538 | * MNT_WRITE_HOLD protects against this scenario, because |
539 | * mnt_want_write first increments count, then smp_mb, then spins on |
540 | * MNT_WRITE_HOLD, so it can't be decremented by another CPU while |
541 | * we're counting up here. |
542 | */ |
543 | if (mnt_get_writers(mnt) > 0) |
544 | return -EBUSY; |
545 | |
546 | return 0; |
547 | } |
548 | |
549 | /** |
550 | * mnt_unhold_writers - stop preventing write access to the given mount |
551 | * @mnt: mnt to stop preventing write access to |
552 | * |
553 | * Stop preventing write access to @mnt allowing callers to gain write access |
554 | * to @mnt again. |
555 | * |
556 | * This function can only be called after a successful call to |
557 | * mnt_hold_writers(). |
558 | * |
559 | * Context: This function expects lock_mount_hash() to be held. |
560 | */ |
561 | static inline void mnt_unhold_writers(struct mount *mnt) |
562 | { |
563 | /* |
564 | * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers |
565 | * that become unheld will see MNT_READONLY. |
566 | */ |
567 | smp_wmb(); |
568 | mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; |
569 | } |
570 | |
571 | static int mnt_make_readonly(struct mount *mnt) |
572 | { |
573 | int ret; |
574 | |
575 | ret = mnt_hold_writers(mnt); |
576 | if (!ret) |
577 | mnt->mnt.mnt_flags |= MNT_READONLY; |
578 | mnt_unhold_writers(mnt); |
579 | return ret; |
580 | } |
581 | |
582 | int sb_prepare_remount_readonly(struct super_block *sb) |
583 | { |
584 | struct mount *mnt; |
585 | int err = 0; |
586 | |
587 | /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ |
588 | if (atomic_long_read(v: &sb->s_remove_count)) |
589 | return -EBUSY; |
590 | |
591 | lock_mount_hash(); |
592 | list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { |
593 | if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { |
594 | err = mnt_hold_writers(mnt); |
595 | if (err) |
596 | break; |
597 | } |
598 | } |
599 | if (!err && atomic_long_read(v: &sb->s_remove_count)) |
600 | err = -EBUSY; |
601 | |
602 | if (!err) |
603 | sb_start_ro_state_change(sb); |
604 | list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { |
605 | if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) |
606 | mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; |
607 | } |
608 | unlock_mount_hash(); |
609 | |
610 | return err; |
611 | } |
612 | |
613 | static void free_vfsmnt(struct mount *mnt) |
614 | { |
615 | mnt_idmap_put(idmap: mnt_idmap(mnt: &mnt->mnt)); |
616 | kfree_const(x: mnt->mnt_devname); |
617 | #ifdef CONFIG_SMP |
618 | free_percpu(pdata: mnt->mnt_pcp); |
619 | #endif |
620 | kmem_cache_free(s: mnt_cache, objp: mnt); |
621 | } |
622 | |
623 | static void delayed_free_vfsmnt(struct rcu_head *head) |
624 | { |
625 | free_vfsmnt(container_of(head, struct mount, mnt_rcu)); |
626 | } |
627 | |
628 | /* call under rcu_read_lock */ |
629 | int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) |
630 | { |
631 | struct mount *mnt; |
632 | if (read_seqretry(sl: &mount_lock, start: seq)) |
633 | return 1; |
634 | if (bastard == NULL) |
635 | return 0; |
636 | mnt = real_mount(mnt: bastard); |
637 | mnt_add_count(mnt, n: 1); |
638 | smp_mb(); // see mntput_no_expire() |
639 | if (likely(!read_seqretry(&mount_lock, seq))) |
640 | return 0; |
641 | if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { |
642 | mnt_add_count(mnt, n: -1); |
643 | return 1; |
644 | } |
645 | lock_mount_hash(); |
646 | if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { |
647 | mnt_add_count(mnt, n: -1); |
648 | unlock_mount_hash(); |
649 | return 1; |
650 | } |
651 | unlock_mount_hash(); |
652 | /* caller will mntput() */ |
653 | return -1; |
654 | } |
655 | |
656 | /* call under rcu_read_lock */ |
657 | static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) |
658 | { |
659 | int res = __legitimize_mnt(bastard, seq); |
660 | if (likely(!res)) |
661 | return true; |
662 | if (unlikely(res < 0)) { |
663 | rcu_read_unlock(); |
664 | mntput(mnt: bastard); |
665 | rcu_read_lock(); |
666 | } |
667 | return false; |
668 | } |
669 | |
670 | /** |
671 | * __lookup_mnt - find first child mount |
672 | * @mnt: parent mount |
673 | * @dentry: mountpoint |
674 | * |
675 | * If @mnt has a child mount @c mounted @dentry find and return it. |
676 | * |
677 | * Note that the child mount @c need not be unique. There are cases |
678 | * where shadow mounts are created. For example, during mount |
679 | * propagation when a source mount @mnt whose root got overmounted by a |
680 | * mount @o after path lookup but before @namespace_sem could be |
681 | * acquired gets copied and propagated. So @mnt gets copied including |
682 | * @o. When @mnt is propagated to a destination mount @d that already |
683 | * has another mount @n mounted at the same mountpoint then the source |
684 | * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on |
685 | * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt |
686 | * on @dentry. |
687 | * |
688 | * Return: The first child of @mnt mounted @dentry or NULL. |
689 | */ |
690 | struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) |
691 | { |
692 | struct hlist_head *head = m_hash(mnt, dentry); |
693 | struct mount *p; |
694 | |
695 | hlist_for_each_entry_rcu(p, head, mnt_hash) |
696 | if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) |
697 | return p; |
698 | return NULL; |
699 | } |
700 | |
701 | /* |
702 | * lookup_mnt - Return the first child mount mounted at path |
703 | * |
704 | * "First" means first mounted chronologically. If you create the |
705 | * following mounts: |
706 | * |
707 | * mount /dev/sda1 /mnt |
708 | * mount /dev/sda2 /mnt |
709 | * mount /dev/sda3 /mnt |
710 | * |
711 | * Then lookup_mnt() on the base /mnt dentry in the root mount will |
712 | * return successively the root dentry and vfsmount of /dev/sda1, then |
713 | * /dev/sda2, then /dev/sda3, then NULL. |
714 | * |
715 | * lookup_mnt takes a reference to the found vfsmount. |
716 | */ |
717 | struct vfsmount *lookup_mnt(const struct path *path) |
718 | { |
719 | struct mount *child_mnt; |
720 | struct vfsmount *m; |
721 | unsigned seq; |
722 | |
723 | rcu_read_lock(); |
724 | do { |
725 | seq = read_seqbegin(sl: &mount_lock); |
726 | child_mnt = __lookup_mnt(mnt: path->mnt, dentry: path->dentry); |
727 | m = child_mnt ? &child_mnt->mnt : NULL; |
728 | } while (!legitimize_mnt(bastard: m, seq)); |
729 | rcu_read_unlock(); |
730 | return m; |
731 | } |
732 | |
733 | static inline void lock_ns_list(struct mnt_namespace *ns) |
734 | { |
735 | spin_lock(lock: &ns->ns_lock); |
736 | } |
737 | |
738 | static inline void unlock_ns_list(struct mnt_namespace *ns) |
739 | { |
740 | spin_unlock(lock: &ns->ns_lock); |
741 | } |
742 | |
743 | static inline bool mnt_is_cursor(struct mount *mnt) |
744 | { |
745 | return mnt->mnt.mnt_flags & MNT_CURSOR; |
746 | } |
747 | |
748 | /* |
749 | * __is_local_mountpoint - Test to see if dentry is a mountpoint in the |
750 | * current mount namespace. |
751 | * |
752 | * The common case is dentries are not mountpoints at all and that |
753 | * test is handled inline. For the slow case when we are actually |
754 | * dealing with a mountpoint of some kind, walk through all of the |
755 | * mounts in the current mount namespace and test to see if the dentry |
756 | * is a mountpoint. |
757 | * |
758 | * The mount_hashtable is not usable in the context because we |
759 | * need to identify all mounts that may be in the current mount |
760 | * namespace not just a mount that happens to have some specified |
761 | * parent mount. |
762 | */ |
763 | bool __is_local_mountpoint(struct dentry *dentry) |
764 | { |
765 | struct mnt_namespace *ns = current->nsproxy->mnt_ns; |
766 | struct mount *mnt; |
767 | bool is_covered = false; |
768 | |
769 | down_read(sem: &namespace_sem); |
770 | lock_ns_list(ns); |
771 | list_for_each_entry(mnt, &ns->list, mnt_list) { |
772 | if (mnt_is_cursor(mnt)) |
773 | continue; |
774 | is_covered = (mnt->mnt_mountpoint == dentry); |
775 | if (is_covered) |
776 | break; |
777 | } |
778 | unlock_ns_list(ns); |
779 | up_read(sem: &namespace_sem); |
780 | |
781 | return is_covered; |
782 | } |
783 | |
784 | static struct mountpoint *lookup_mountpoint(struct dentry *dentry) |
785 | { |
786 | struct hlist_head *chain = mp_hash(dentry); |
787 | struct mountpoint *mp; |
788 | |
789 | hlist_for_each_entry(mp, chain, m_hash) { |
790 | if (mp->m_dentry == dentry) { |
791 | mp->m_count++; |
792 | return mp; |
793 | } |
794 | } |
795 | return NULL; |
796 | } |
797 | |
798 | static struct mountpoint *get_mountpoint(struct dentry *dentry) |
799 | { |
800 | struct mountpoint *mp, *new = NULL; |
801 | int ret; |
802 | |
803 | if (d_mountpoint(dentry)) { |
804 | /* might be worth a WARN_ON() */ |
805 | if (d_unlinked(dentry)) |
806 | return ERR_PTR(error: -ENOENT); |
807 | mountpoint: |
808 | read_seqlock_excl(sl: &mount_lock); |
809 | mp = lookup_mountpoint(dentry); |
810 | read_sequnlock_excl(sl: &mount_lock); |
811 | if (mp) |
812 | goto done; |
813 | } |
814 | |
815 | if (!new) |
816 | new = kmalloc(size: sizeof(struct mountpoint), GFP_KERNEL); |
817 | if (!new) |
818 | return ERR_PTR(error: -ENOMEM); |
819 | |
820 | |
821 | /* Exactly one processes may set d_mounted */ |
822 | ret = d_set_mounted(dentry); |
823 | |
824 | /* Someone else set d_mounted? */ |
825 | if (ret == -EBUSY) |
826 | goto mountpoint; |
827 | |
828 | /* The dentry is not available as a mountpoint? */ |
829 | mp = ERR_PTR(error: ret); |
830 | if (ret) |
831 | goto done; |
832 | |
833 | /* Add the new mountpoint to the hash table */ |
834 | read_seqlock_excl(sl: &mount_lock); |
835 | new->m_dentry = dget(dentry); |
836 | new->m_count = 1; |
837 | hlist_add_head(n: &new->m_hash, h: mp_hash(dentry)); |
838 | INIT_HLIST_HEAD(&new->m_list); |
839 | read_sequnlock_excl(sl: &mount_lock); |
840 | |
841 | mp = new; |
842 | new = NULL; |
843 | done: |
844 | kfree(objp: new); |
845 | return mp; |
846 | } |
847 | |
848 | /* |
849 | * vfsmount lock must be held. Additionally, the caller is responsible |
850 | * for serializing calls for given disposal list. |
851 | */ |
852 | static void __put_mountpoint(struct mountpoint *mp, struct list_head *list) |
853 | { |
854 | if (!--mp->m_count) { |
855 | struct dentry *dentry = mp->m_dentry; |
856 | BUG_ON(!hlist_empty(&mp->m_list)); |
857 | spin_lock(lock: &dentry->d_lock); |
858 | dentry->d_flags &= ~DCACHE_MOUNTED; |
859 | spin_unlock(lock: &dentry->d_lock); |
860 | dput_to_list(dentry, list); |
861 | hlist_del(n: &mp->m_hash); |
862 | kfree(objp: mp); |
863 | } |
864 | } |
865 | |
866 | /* called with namespace_lock and vfsmount lock */ |
867 | static void put_mountpoint(struct mountpoint *mp) |
868 | { |
869 | __put_mountpoint(mp, list: &ex_mountpoints); |
870 | } |
871 | |
872 | static inline int check_mnt(struct mount *mnt) |
873 | { |
874 | return mnt->mnt_ns == current->nsproxy->mnt_ns; |
875 | } |
876 | |
877 | /* |
878 | * vfsmount lock must be held for write |
879 | */ |
880 | static void touch_mnt_namespace(struct mnt_namespace *ns) |
881 | { |
882 | if (ns) { |
883 | ns->event = ++event; |
884 | wake_up_interruptible(&ns->poll); |
885 | } |
886 | } |
887 | |
888 | /* |
889 | * vfsmount lock must be held for write |
890 | */ |
891 | static void __touch_mnt_namespace(struct mnt_namespace *ns) |
892 | { |
893 | if (ns && ns->event != event) { |
894 | ns->event = event; |
895 | wake_up_interruptible(&ns->poll); |
896 | } |
897 | } |
898 | |
899 | /* |
900 | * vfsmount lock must be held for write |
901 | */ |
902 | static struct mountpoint *unhash_mnt(struct mount *mnt) |
903 | { |
904 | struct mountpoint *mp; |
905 | mnt->mnt_parent = mnt; |
906 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
907 | list_del_init(entry: &mnt->mnt_child); |
908 | hlist_del_init_rcu(n: &mnt->mnt_hash); |
909 | hlist_del_init(n: &mnt->mnt_mp_list); |
910 | mp = mnt->mnt_mp; |
911 | mnt->mnt_mp = NULL; |
912 | return mp; |
913 | } |
914 | |
915 | /* |
916 | * vfsmount lock must be held for write |
917 | */ |
918 | static void umount_mnt(struct mount *mnt) |
919 | { |
920 | put_mountpoint(mp: unhash_mnt(mnt)); |
921 | } |
922 | |
923 | /* |
924 | * vfsmount lock must be held for write |
925 | */ |
926 | void mnt_set_mountpoint(struct mount *mnt, |
927 | struct mountpoint *mp, |
928 | struct mount *child_mnt) |
929 | { |
930 | mp->m_count++; |
931 | mnt_add_count(mnt, n: 1); /* essentially, that's mntget */ |
932 | child_mnt->mnt_mountpoint = mp->m_dentry; |
933 | child_mnt->mnt_parent = mnt; |
934 | child_mnt->mnt_mp = mp; |
935 | hlist_add_head(n: &child_mnt->mnt_mp_list, h: &mp->m_list); |
936 | } |
937 | |
938 | /** |
939 | * mnt_set_mountpoint_beneath - mount a mount beneath another one |
940 | * |
941 | * @new_parent: the source mount |
942 | * @top_mnt: the mount beneath which @new_parent is mounted |
943 | * @new_mp: the new mountpoint of @top_mnt on @new_parent |
944 | * |
945 | * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and |
946 | * parent @top_mnt->mnt_parent and mount it on top of @new_parent at |
947 | * @new_mp. And mount @new_parent on the old parent and old |
948 | * mountpoint of @top_mnt. |
949 | * |
950 | * Context: This function expects namespace_lock() and lock_mount_hash() |
951 | * to have been acquired in that order. |
952 | */ |
953 | static void mnt_set_mountpoint_beneath(struct mount *new_parent, |
954 | struct mount *top_mnt, |
955 | struct mountpoint *new_mp) |
956 | { |
957 | struct mount *old_top_parent = top_mnt->mnt_parent; |
958 | struct mountpoint *old_top_mp = top_mnt->mnt_mp; |
959 | |
960 | mnt_set_mountpoint(mnt: old_top_parent, mp: old_top_mp, child_mnt: new_parent); |
961 | mnt_change_mountpoint(parent: new_parent, mp: new_mp, mnt: top_mnt); |
962 | } |
963 | |
964 | |
965 | static void __attach_mnt(struct mount *mnt, struct mount *parent) |
966 | { |
967 | hlist_add_head_rcu(n: &mnt->mnt_hash, |
968 | h: m_hash(mnt: &parent->mnt, dentry: mnt->mnt_mountpoint)); |
969 | list_add_tail(new: &mnt->mnt_child, head: &parent->mnt_mounts); |
970 | } |
971 | |
972 | /** |
973 | * attach_mnt - mount a mount, attach to @mount_hashtable and parent's |
974 | * list of child mounts |
975 | * @parent: the parent |
976 | * @mnt: the new mount |
977 | * @mp: the new mountpoint |
978 | * @beneath: whether to mount @mnt beneath or on top of @parent |
979 | * |
980 | * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt |
981 | * to @parent's child mount list and to @mount_hashtable. |
982 | * |
983 | * If @beneath is true, remove @mnt from its current parent and |
984 | * mountpoint and mount it on @mp on @parent, and mount @parent on the |
985 | * old parent and old mountpoint of @mnt. Finally, attach @parent to |
986 | * @mnt_hashtable and @parent->mnt_parent->mnt_mounts. |
987 | * |
988 | * Note, when __attach_mnt() is called @mnt->mnt_parent already points |
989 | * to the correct parent. |
990 | * |
991 | * Context: This function expects namespace_lock() and lock_mount_hash() |
992 | * to have been acquired in that order. |
993 | */ |
994 | static void attach_mnt(struct mount *mnt, struct mount *parent, |
995 | struct mountpoint *mp, bool beneath) |
996 | { |
997 | if (beneath) |
998 | mnt_set_mountpoint_beneath(new_parent: mnt, top_mnt: parent, new_mp: mp); |
999 | else |
1000 | mnt_set_mountpoint(mnt: parent, mp, child_mnt: mnt); |
1001 | /* |
1002 | * Note, @mnt->mnt_parent has to be used. If @mnt was mounted |
1003 | * beneath @parent then @mnt will need to be attached to |
1004 | * @parent's old parent, not @parent. IOW, @mnt->mnt_parent |
1005 | * isn't the same mount as @parent. |
1006 | */ |
1007 | __attach_mnt(mnt, parent: mnt->mnt_parent); |
1008 | } |
1009 | |
1010 | void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) |
1011 | { |
1012 | struct mountpoint *old_mp = mnt->mnt_mp; |
1013 | struct mount *old_parent = mnt->mnt_parent; |
1014 | |
1015 | list_del_init(entry: &mnt->mnt_child); |
1016 | hlist_del_init(n: &mnt->mnt_mp_list); |
1017 | hlist_del_init_rcu(n: &mnt->mnt_hash); |
1018 | |
1019 | attach_mnt(mnt, parent, mp, beneath: false); |
1020 | |
1021 | put_mountpoint(mp: old_mp); |
1022 | mnt_add_count(mnt: old_parent, n: -1); |
1023 | } |
1024 | |
1025 | /* |
1026 | * vfsmount lock must be held for write |
1027 | */ |
1028 | static void commit_tree(struct mount *mnt) |
1029 | { |
1030 | struct mount *parent = mnt->mnt_parent; |
1031 | struct mount *m; |
1032 | LIST_HEAD(head); |
1033 | struct mnt_namespace *n = parent->mnt_ns; |
1034 | |
1035 | BUG_ON(parent == mnt); |
1036 | |
1037 | list_add_tail(new: &head, head: &mnt->mnt_list); |
1038 | list_for_each_entry(m, &head, mnt_list) |
1039 | m->mnt_ns = n; |
1040 | |
1041 | list_splice(list: &head, head: n->list.prev); |
1042 | |
1043 | n->mounts += n->pending_mounts; |
1044 | n->pending_mounts = 0; |
1045 | |
1046 | __attach_mnt(mnt, parent); |
1047 | touch_mnt_namespace(ns: n); |
1048 | } |
1049 | |
1050 | static struct mount *next_mnt(struct mount *p, struct mount *root) |
1051 | { |
1052 | struct list_head *next = p->mnt_mounts.next; |
1053 | if (next == &p->mnt_mounts) { |
1054 | while (1) { |
1055 | if (p == root) |
1056 | return NULL; |
1057 | next = p->mnt_child.next; |
1058 | if (next != &p->mnt_parent->mnt_mounts) |
1059 | break; |
1060 | p = p->mnt_parent; |
1061 | } |
1062 | } |
1063 | return list_entry(next, struct mount, mnt_child); |
1064 | } |
1065 | |
1066 | static struct mount *skip_mnt_tree(struct mount *p) |
1067 | { |
1068 | struct list_head *prev = p->mnt_mounts.prev; |
1069 | while (prev != &p->mnt_mounts) { |
1070 | p = list_entry(prev, struct mount, mnt_child); |
1071 | prev = p->mnt_mounts.prev; |
1072 | } |
1073 | return p; |
1074 | } |
1075 | |
1076 | /** |
1077 | * vfs_create_mount - Create a mount for a configured superblock |
1078 | * @fc: The configuration context with the superblock attached |
1079 | * |
1080 | * Create a mount to an already configured superblock. If necessary, the |
1081 | * caller should invoke vfs_get_tree() before calling this. |
1082 | * |
1083 | * Note that this does not attach the mount to anything. |
1084 | */ |
1085 | struct vfsmount *vfs_create_mount(struct fs_context *fc) |
1086 | { |
1087 | struct mount *mnt; |
1088 | |
1089 | if (!fc->root) |
1090 | return ERR_PTR(error: -EINVAL); |
1091 | |
1092 | mnt = alloc_vfsmnt(name: fc->source ?: "none" ); |
1093 | if (!mnt) |
1094 | return ERR_PTR(error: -ENOMEM); |
1095 | |
1096 | if (fc->sb_flags & SB_KERNMOUNT) |
1097 | mnt->mnt.mnt_flags = MNT_INTERNAL; |
1098 | |
1099 | atomic_inc(v: &fc->root->d_sb->s_active); |
1100 | mnt->mnt.mnt_sb = fc->root->d_sb; |
1101 | mnt->mnt.mnt_root = dget(dentry: fc->root); |
1102 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
1103 | mnt->mnt_parent = mnt; |
1104 | |
1105 | lock_mount_hash(); |
1106 | list_add_tail(new: &mnt->mnt_instance, head: &mnt->mnt.mnt_sb->s_mounts); |
1107 | unlock_mount_hash(); |
1108 | return &mnt->mnt; |
1109 | } |
1110 | EXPORT_SYMBOL(vfs_create_mount); |
1111 | |
1112 | struct vfsmount *fc_mount(struct fs_context *fc) |
1113 | { |
1114 | int err = vfs_get_tree(fc); |
1115 | if (!err) { |
1116 | up_write(sem: &fc->root->d_sb->s_umount); |
1117 | return vfs_create_mount(fc); |
1118 | } |
1119 | return ERR_PTR(error: err); |
1120 | } |
1121 | EXPORT_SYMBOL(fc_mount); |
1122 | |
1123 | struct vfsmount *vfs_kern_mount(struct file_system_type *type, |
1124 | int flags, const char *name, |
1125 | void *data) |
1126 | { |
1127 | struct fs_context *fc; |
1128 | struct vfsmount *mnt; |
1129 | int ret = 0; |
1130 | |
1131 | if (!type) |
1132 | return ERR_PTR(error: -EINVAL); |
1133 | |
1134 | fc = fs_context_for_mount(fs_type: type, sb_flags: flags); |
1135 | if (IS_ERR(ptr: fc)) |
1136 | return ERR_CAST(ptr: fc); |
1137 | |
1138 | if (name) |
1139 | ret = vfs_parse_fs_string(fc, key: "source" , |
1140 | value: name, strlen(name)); |
1141 | if (!ret) |
1142 | ret = parse_monolithic_mount_data(fc, data); |
1143 | if (!ret) |
1144 | mnt = fc_mount(fc); |
1145 | else |
1146 | mnt = ERR_PTR(error: ret); |
1147 | |
1148 | put_fs_context(fc); |
1149 | return mnt; |
1150 | } |
1151 | EXPORT_SYMBOL_GPL(vfs_kern_mount); |
1152 | |
1153 | struct vfsmount * |
1154 | vfs_submount(const struct dentry *mountpoint, struct file_system_type *type, |
1155 | const char *name, void *data) |
1156 | { |
1157 | /* Until it is worked out how to pass the user namespace |
1158 | * through from the parent mount to the submount don't support |
1159 | * unprivileged mounts with submounts. |
1160 | */ |
1161 | if (mountpoint->d_sb->s_user_ns != &init_user_ns) |
1162 | return ERR_PTR(error: -EPERM); |
1163 | |
1164 | return vfs_kern_mount(type, SB_SUBMOUNT, name, data); |
1165 | } |
1166 | EXPORT_SYMBOL_GPL(vfs_submount); |
1167 | |
1168 | static struct mount *clone_mnt(struct mount *old, struct dentry *root, |
1169 | int flag) |
1170 | { |
1171 | struct super_block *sb = old->mnt.mnt_sb; |
1172 | struct mount *mnt; |
1173 | int err; |
1174 | |
1175 | mnt = alloc_vfsmnt(name: old->mnt_devname); |
1176 | if (!mnt) |
1177 | return ERR_PTR(error: -ENOMEM); |
1178 | |
1179 | if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) |
1180 | mnt->mnt_group_id = 0; /* not a peer of original */ |
1181 | else |
1182 | mnt->mnt_group_id = old->mnt_group_id; |
1183 | |
1184 | if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { |
1185 | err = mnt_alloc_group_id(mnt); |
1186 | if (err) |
1187 | goto out_free; |
1188 | } |
1189 | |
1190 | mnt->mnt.mnt_flags = old->mnt.mnt_flags; |
1191 | mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); |
1192 | |
1193 | atomic_inc(v: &sb->s_active); |
1194 | mnt->mnt.mnt_idmap = mnt_idmap_get(idmap: mnt_idmap(mnt: &old->mnt)); |
1195 | |
1196 | mnt->mnt.mnt_sb = sb; |
1197 | mnt->mnt.mnt_root = dget(dentry: root); |
1198 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
1199 | mnt->mnt_parent = mnt; |
1200 | lock_mount_hash(); |
1201 | list_add_tail(new: &mnt->mnt_instance, head: &sb->s_mounts); |
1202 | unlock_mount_hash(); |
1203 | |
1204 | if ((flag & CL_SLAVE) || |
1205 | ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { |
1206 | list_add(new: &mnt->mnt_slave, head: &old->mnt_slave_list); |
1207 | mnt->mnt_master = old; |
1208 | CLEAR_MNT_SHARED(mnt); |
1209 | } else if (!(flag & CL_PRIVATE)) { |
1210 | if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) |
1211 | list_add(new: &mnt->mnt_share, head: &old->mnt_share); |
1212 | if (IS_MNT_SLAVE(old)) |
1213 | list_add(new: &mnt->mnt_slave, head: &old->mnt_slave); |
1214 | mnt->mnt_master = old->mnt_master; |
1215 | } else { |
1216 | CLEAR_MNT_SHARED(mnt); |
1217 | } |
1218 | if (flag & CL_MAKE_SHARED) |
1219 | set_mnt_shared(mnt); |
1220 | |
1221 | /* stick the duplicate mount on the same expiry list |
1222 | * as the original if that was on one */ |
1223 | if (flag & CL_EXPIRE) { |
1224 | if (!list_empty(head: &old->mnt_expire)) |
1225 | list_add(new: &mnt->mnt_expire, head: &old->mnt_expire); |
1226 | } |
1227 | |
1228 | return mnt; |
1229 | |
1230 | out_free: |
1231 | mnt_free_id(mnt); |
1232 | free_vfsmnt(mnt); |
1233 | return ERR_PTR(error: err); |
1234 | } |
1235 | |
1236 | static void cleanup_mnt(struct mount *mnt) |
1237 | { |
1238 | struct hlist_node *p; |
1239 | struct mount *m; |
1240 | /* |
1241 | * The warning here probably indicates that somebody messed |
1242 | * up a mnt_want/drop_write() pair. If this happens, the |
1243 | * filesystem was probably unable to make r/w->r/o transitions. |
1244 | * The locking used to deal with mnt_count decrement provides barriers, |
1245 | * so mnt_get_writers() below is safe. |
1246 | */ |
1247 | WARN_ON(mnt_get_writers(mnt)); |
1248 | if (unlikely(mnt->mnt_pins.first)) |
1249 | mnt_pin_kill(m: mnt); |
1250 | hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { |
1251 | hlist_del(n: &m->mnt_umount); |
1252 | mntput(mnt: &m->mnt); |
1253 | } |
1254 | fsnotify_vfsmount_delete(mnt: &mnt->mnt); |
1255 | dput(mnt->mnt.mnt_root); |
1256 | deactivate_super(sb: mnt->mnt.mnt_sb); |
1257 | mnt_free_id(mnt); |
1258 | call_rcu(head: &mnt->mnt_rcu, func: delayed_free_vfsmnt); |
1259 | } |
1260 | |
1261 | static void __cleanup_mnt(struct rcu_head *head) |
1262 | { |
1263 | cleanup_mnt(container_of(head, struct mount, mnt_rcu)); |
1264 | } |
1265 | |
1266 | static LLIST_HEAD(delayed_mntput_list); |
1267 | static void delayed_mntput(struct work_struct *unused) |
1268 | { |
1269 | struct llist_node *node = llist_del_all(head: &delayed_mntput_list); |
1270 | struct mount *m, *t; |
1271 | |
1272 | llist_for_each_entry_safe(m, t, node, mnt_llist) |
1273 | cleanup_mnt(mnt: m); |
1274 | } |
1275 | static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); |
1276 | |
1277 | static void mntput_no_expire(struct mount *mnt) |
1278 | { |
1279 | LIST_HEAD(list); |
1280 | int count; |
1281 | |
1282 | rcu_read_lock(); |
1283 | if (likely(READ_ONCE(mnt->mnt_ns))) { |
1284 | /* |
1285 | * Since we don't do lock_mount_hash() here, |
1286 | * ->mnt_ns can change under us. However, if it's |
1287 | * non-NULL, then there's a reference that won't |
1288 | * be dropped until after an RCU delay done after |
1289 | * turning ->mnt_ns NULL. So if we observe it |
1290 | * non-NULL under rcu_read_lock(), the reference |
1291 | * we are dropping is not the final one. |
1292 | */ |
1293 | mnt_add_count(mnt, n: -1); |
1294 | rcu_read_unlock(); |
1295 | return; |
1296 | } |
1297 | lock_mount_hash(); |
1298 | /* |
1299 | * make sure that if __legitimize_mnt() has not seen us grab |
1300 | * mount_lock, we'll see their refcount increment here. |
1301 | */ |
1302 | smp_mb(); |
1303 | mnt_add_count(mnt, n: -1); |
1304 | count = mnt_get_count(mnt); |
1305 | if (count != 0) { |
1306 | WARN_ON(count < 0); |
1307 | rcu_read_unlock(); |
1308 | unlock_mount_hash(); |
1309 | return; |
1310 | } |
1311 | if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { |
1312 | rcu_read_unlock(); |
1313 | unlock_mount_hash(); |
1314 | return; |
1315 | } |
1316 | mnt->mnt.mnt_flags |= MNT_DOOMED; |
1317 | rcu_read_unlock(); |
1318 | |
1319 | list_del(entry: &mnt->mnt_instance); |
1320 | |
1321 | if (unlikely(!list_empty(&mnt->mnt_mounts))) { |
1322 | struct mount *p, *tmp; |
1323 | list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { |
1324 | __put_mountpoint(mp: unhash_mnt(mnt: p), list: &list); |
1325 | hlist_add_head(n: &p->mnt_umount, h: &mnt->mnt_stuck_children); |
1326 | } |
1327 | } |
1328 | unlock_mount_hash(); |
1329 | shrink_dentry_list(&list); |
1330 | |
1331 | if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { |
1332 | struct task_struct *task = current; |
1333 | if (likely(!(task->flags & PF_KTHREAD))) { |
1334 | init_task_work(twork: &mnt->mnt_rcu, func: __cleanup_mnt); |
1335 | if (!task_work_add(task, twork: &mnt->mnt_rcu, mode: TWA_RESUME)) |
1336 | return; |
1337 | } |
1338 | if (llist_add(new: &mnt->mnt_llist, head: &delayed_mntput_list)) |
1339 | schedule_delayed_work(dwork: &delayed_mntput_work, delay: 1); |
1340 | return; |
1341 | } |
1342 | cleanup_mnt(mnt); |
1343 | } |
1344 | |
1345 | void mntput(struct vfsmount *mnt) |
1346 | { |
1347 | if (mnt) { |
1348 | struct mount *m = real_mount(mnt); |
1349 | /* avoid cacheline pingpong */ |
1350 | if (unlikely(m->mnt_expiry_mark)) |
1351 | WRITE_ONCE(m->mnt_expiry_mark, 0); |
1352 | mntput_no_expire(mnt: m); |
1353 | } |
1354 | } |
1355 | EXPORT_SYMBOL(mntput); |
1356 | |
1357 | struct vfsmount *mntget(struct vfsmount *mnt) |
1358 | { |
1359 | if (mnt) |
1360 | mnt_add_count(mnt: real_mount(mnt), n: 1); |
1361 | return mnt; |
1362 | } |
1363 | EXPORT_SYMBOL(mntget); |
1364 | |
1365 | /* |
1366 | * Make a mount point inaccessible to new lookups. |
1367 | * Because there may still be current users, the caller MUST WAIT |
1368 | * for an RCU grace period before destroying the mount point. |
1369 | */ |
1370 | void mnt_make_shortterm(struct vfsmount *mnt) |
1371 | { |
1372 | if (mnt) |
1373 | real_mount(mnt)->mnt_ns = NULL; |
1374 | } |
1375 | |
1376 | /** |
1377 | * path_is_mountpoint() - Check if path is a mount in the current namespace. |
1378 | * @path: path to check |
1379 | * |
1380 | * d_mountpoint() can only be used reliably to establish if a dentry is |
1381 | * not mounted in any namespace and that common case is handled inline. |
1382 | * d_mountpoint() isn't aware of the possibility there may be multiple |
1383 | * mounts using a given dentry in a different namespace. This function |
1384 | * checks if the passed in path is a mountpoint rather than the dentry |
1385 | * alone. |
1386 | */ |
1387 | bool path_is_mountpoint(const struct path *path) |
1388 | { |
1389 | unsigned seq; |
1390 | bool res; |
1391 | |
1392 | if (!d_mountpoint(dentry: path->dentry)) |
1393 | return false; |
1394 | |
1395 | rcu_read_lock(); |
1396 | do { |
1397 | seq = read_seqbegin(sl: &mount_lock); |
1398 | res = __path_is_mountpoint(path); |
1399 | } while (read_seqretry(sl: &mount_lock, start: seq)); |
1400 | rcu_read_unlock(); |
1401 | |
1402 | return res; |
1403 | } |
1404 | EXPORT_SYMBOL(path_is_mountpoint); |
1405 | |
1406 | struct vfsmount *mnt_clone_internal(const struct path *path) |
1407 | { |
1408 | struct mount *p; |
1409 | p = clone_mnt(old: real_mount(mnt: path->mnt), root: path->dentry, CL_PRIVATE); |
1410 | if (IS_ERR(ptr: p)) |
1411 | return ERR_CAST(ptr: p); |
1412 | p->mnt.mnt_flags |= MNT_INTERNAL; |
1413 | return &p->mnt; |
1414 | } |
1415 | |
1416 | #ifdef CONFIG_PROC_FS |
1417 | static struct mount *mnt_list_next(struct mnt_namespace *ns, |
1418 | struct list_head *p) |
1419 | { |
1420 | struct mount *mnt, *ret = NULL; |
1421 | |
1422 | lock_ns_list(ns); |
1423 | list_for_each_continue(p, &ns->list) { |
1424 | mnt = list_entry(p, typeof(*mnt), mnt_list); |
1425 | if (!mnt_is_cursor(mnt)) { |
1426 | ret = mnt; |
1427 | break; |
1428 | } |
1429 | } |
1430 | unlock_ns_list(ns); |
1431 | |
1432 | return ret; |
1433 | } |
1434 | |
1435 | /* iterator; we want it to have access to namespace_sem, thus here... */ |
1436 | static void *m_start(struct seq_file *m, loff_t *pos) |
1437 | { |
1438 | struct proc_mounts *p = m->private; |
1439 | struct list_head *prev; |
1440 | |
1441 | down_read(sem: &namespace_sem); |
1442 | if (!*pos) { |
1443 | prev = &p->ns->list; |
1444 | } else { |
1445 | prev = &p->cursor.mnt_list; |
1446 | |
1447 | /* Read after we'd reached the end? */ |
1448 | if (list_empty(head: prev)) |
1449 | return NULL; |
1450 | } |
1451 | |
1452 | return mnt_list_next(ns: p->ns, p: prev); |
1453 | } |
1454 | |
1455 | static void *m_next(struct seq_file *m, void *v, loff_t *pos) |
1456 | { |
1457 | struct proc_mounts *p = m->private; |
1458 | struct mount *mnt = v; |
1459 | |
1460 | ++*pos; |
1461 | return mnt_list_next(ns: p->ns, p: &mnt->mnt_list); |
1462 | } |
1463 | |
1464 | static void m_stop(struct seq_file *m, void *v) |
1465 | { |
1466 | struct proc_mounts *p = m->private; |
1467 | struct mount *mnt = v; |
1468 | |
1469 | lock_ns_list(ns: p->ns); |
1470 | if (mnt) |
1471 | list_move_tail(list: &p->cursor.mnt_list, head: &mnt->mnt_list); |
1472 | else |
1473 | list_del_init(entry: &p->cursor.mnt_list); |
1474 | unlock_ns_list(ns: p->ns); |
1475 | up_read(sem: &namespace_sem); |
1476 | } |
1477 | |
1478 | static int m_show(struct seq_file *m, void *v) |
1479 | { |
1480 | struct proc_mounts *p = m->private; |
1481 | struct mount *r = v; |
1482 | return p->show(m, &r->mnt); |
1483 | } |
1484 | |
1485 | const struct seq_operations mounts_op = { |
1486 | .start = m_start, |
1487 | .next = m_next, |
1488 | .stop = m_stop, |
1489 | .show = m_show, |
1490 | }; |
1491 | |
1492 | void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor) |
1493 | { |
1494 | down_read(sem: &namespace_sem); |
1495 | lock_ns_list(ns); |
1496 | list_del(entry: &cursor->mnt_list); |
1497 | unlock_ns_list(ns); |
1498 | up_read(sem: &namespace_sem); |
1499 | } |
1500 | #endif /* CONFIG_PROC_FS */ |
1501 | |
1502 | /** |
1503 | * may_umount_tree - check if a mount tree is busy |
1504 | * @m: root of mount tree |
1505 | * |
1506 | * This is called to check if a tree of mounts has any |
1507 | * open files, pwds, chroots or sub mounts that are |
1508 | * busy. |
1509 | */ |
1510 | int may_umount_tree(struct vfsmount *m) |
1511 | { |
1512 | struct mount *mnt = real_mount(mnt: m); |
1513 | int actual_refs = 0; |
1514 | int minimum_refs = 0; |
1515 | struct mount *p; |
1516 | BUG_ON(!m); |
1517 | |
1518 | /* write lock needed for mnt_get_count */ |
1519 | lock_mount_hash(); |
1520 | for (p = mnt; p; p = next_mnt(p, root: mnt)) { |
1521 | actual_refs += mnt_get_count(mnt: p); |
1522 | minimum_refs += 2; |
1523 | } |
1524 | unlock_mount_hash(); |
1525 | |
1526 | if (actual_refs > minimum_refs) |
1527 | return 0; |
1528 | |
1529 | return 1; |
1530 | } |
1531 | |
1532 | EXPORT_SYMBOL(may_umount_tree); |
1533 | |
1534 | /** |
1535 | * may_umount - check if a mount point is busy |
1536 | * @mnt: root of mount |
1537 | * |
1538 | * This is called to check if a mount point has any |
1539 | * open files, pwds, chroots or sub mounts. If the |
1540 | * mount has sub mounts this will return busy |
1541 | * regardless of whether the sub mounts are busy. |
1542 | * |
1543 | * Doesn't take quota and stuff into account. IOW, in some cases it will |
1544 | * give false negatives. The main reason why it's here is that we need |
1545 | * a non-destructive way to look for easily umountable filesystems. |
1546 | */ |
1547 | int may_umount(struct vfsmount *mnt) |
1548 | { |
1549 | int ret = 1; |
1550 | down_read(sem: &namespace_sem); |
1551 | lock_mount_hash(); |
1552 | if (propagate_mount_busy(real_mount(mnt), 2)) |
1553 | ret = 0; |
1554 | unlock_mount_hash(); |
1555 | up_read(sem: &namespace_sem); |
1556 | return ret; |
1557 | } |
1558 | |
1559 | EXPORT_SYMBOL(may_umount); |
1560 | |
1561 | static void namespace_unlock(void) |
1562 | { |
1563 | struct hlist_head head; |
1564 | struct hlist_node *p; |
1565 | struct mount *m; |
1566 | LIST_HEAD(list); |
1567 | |
1568 | hlist_move_list(old: &unmounted, new: &head); |
1569 | list_splice_init(list: &ex_mountpoints, head: &list); |
1570 | |
1571 | up_write(sem: &namespace_sem); |
1572 | |
1573 | shrink_dentry_list(&list); |
1574 | |
1575 | if (likely(hlist_empty(&head))) |
1576 | return; |
1577 | |
1578 | synchronize_rcu_expedited(); |
1579 | |
1580 | hlist_for_each_entry_safe(m, p, &head, mnt_umount) { |
1581 | hlist_del(n: &m->mnt_umount); |
1582 | mntput(&m->mnt); |
1583 | } |
1584 | } |
1585 | |
1586 | static inline void namespace_lock(void) |
1587 | { |
1588 | down_write(sem: &namespace_sem); |
1589 | } |
1590 | |
1591 | enum umount_tree_flags { |
1592 | UMOUNT_SYNC = 1, |
1593 | UMOUNT_PROPAGATE = 2, |
1594 | UMOUNT_CONNECTED = 4, |
1595 | }; |
1596 | |
1597 | static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) |
1598 | { |
1599 | /* Leaving mounts connected is only valid for lazy umounts */ |
1600 | if (how & UMOUNT_SYNC) |
1601 | return true; |
1602 | |
1603 | /* A mount without a parent has nothing to be connected to */ |
1604 | if (!mnt_has_parent(mnt)) |
1605 | return true; |
1606 | |
1607 | /* Because the reference counting rules change when mounts are |
1608 | * unmounted and connected, umounted mounts may not be |
1609 | * connected to mounted mounts. |
1610 | */ |
1611 | if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) |
1612 | return true; |
1613 | |
1614 | /* Has it been requested that the mount remain connected? */ |
1615 | if (how & UMOUNT_CONNECTED) |
1616 | return false; |
1617 | |
1618 | /* Is the mount locked such that it needs to remain connected? */ |
1619 | if (IS_MNT_LOCKED(mnt)) |
1620 | return false; |
1621 | |
1622 | /* By default disconnect the mount */ |
1623 | return true; |
1624 | } |
1625 | |
1626 | /* |
1627 | * mount_lock must be held |
1628 | * namespace_sem must be held for write |
1629 | */ |
1630 | static void umount_tree(struct mount *mnt, enum umount_tree_flags how) |
1631 | { |
1632 | LIST_HEAD(tmp_list); |
1633 | struct mount *p; |
1634 | |
1635 | if (how & UMOUNT_PROPAGATE) |
1636 | propagate_mount_unlock(mnt); |
1637 | |
1638 | /* Gather the mounts to umount */ |
1639 | for (p = mnt; p; p = next_mnt(p, root: mnt)) { |
1640 | p->mnt.mnt_flags |= MNT_UMOUNT; |
1641 | list_move(list: &p->mnt_list, head: &tmp_list); |
1642 | } |
1643 | |
1644 | /* Hide the mounts from mnt_mounts */ |
1645 | list_for_each_entry(p, &tmp_list, mnt_list) { |
1646 | list_del_init(entry: &p->mnt_child); |
1647 | } |
1648 | |
1649 | /* Add propogated mounts to the tmp_list */ |
1650 | if (how & UMOUNT_PROPAGATE) |
1651 | propagate_umount(&tmp_list); |
1652 | |
1653 | while (!list_empty(head: &tmp_list)) { |
1654 | struct mnt_namespace *ns; |
1655 | bool disconnect; |
1656 | p = list_first_entry(&tmp_list, struct mount, mnt_list); |
1657 | list_del_init(entry: &p->mnt_expire); |
1658 | list_del_init(entry: &p->mnt_list); |
1659 | ns = p->mnt_ns; |
1660 | if (ns) { |
1661 | ns->mounts--; |
1662 | __touch_mnt_namespace(ns); |
1663 | } |
1664 | p->mnt_ns = NULL; |
1665 | if (how & UMOUNT_SYNC) |
1666 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; |
1667 | |
1668 | disconnect = disconnect_mount(mnt: p, how); |
1669 | if (mnt_has_parent(mnt: p)) { |
1670 | mnt_add_count(mnt: p->mnt_parent, n: -1); |
1671 | if (!disconnect) { |
1672 | /* Don't forget about p */ |
1673 | list_add_tail(new: &p->mnt_child, head: &p->mnt_parent->mnt_mounts); |
1674 | } else { |
1675 | umount_mnt(mnt: p); |
1676 | } |
1677 | } |
1678 | change_mnt_propagation(p, MS_PRIVATE); |
1679 | if (disconnect) |
1680 | hlist_add_head(n: &p->mnt_umount, h: &unmounted); |
1681 | } |
1682 | } |
1683 | |
1684 | static void shrink_submounts(struct mount *mnt); |
1685 | |
1686 | static int do_umount_root(struct super_block *sb) |
1687 | { |
1688 | int ret = 0; |
1689 | |
1690 | down_write(sem: &sb->s_umount); |
1691 | if (!sb_rdonly(sb)) { |
1692 | struct fs_context *fc; |
1693 | |
1694 | fc = fs_context_for_reconfigure(dentry: sb->s_root, SB_RDONLY, |
1695 | SB_RDONLY); |
1696 | if (IS_ERR(ptr: fc)) { |
1697 | ret = PTR_ERR(ptr: fc); |
1698 | } else { |
1699 | ret = parse_monolithic_mount_data(fc, NULL); |
1700 | if (!ret) |
1701 | ret = reconfigure_super(fc); |
1702 | put_fs_context(fc); |
1703 | } |
1704 | } |
1705 | up_write(sem: &sb->s_umount); |
1706 | return ret; |
1707 | } |
1708 | |
1709 | static int do_umount(struct mount *mnt, int flags) |
1710 | { |
1711 | struct super_block *sb = mnt->mnt.mnt_sb; |
1712 | int retval; |
1713 | |
1714 | retval = security_sb_umount(mnt: &mnt->mnt, flags); |
1715 | if (retval) |
1716 | return retval; |
1717 | |
1718 | /* |
1719 | * Allow userspace to request a mountpoint be expired rather than |
1720 | * unmounting unconditionally. Unmount only happens if: |
1721 | * (1) the mark is already set (the mark is cleared by mntput()) |
1722 | * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] |
1723 | */ |
1724 | if (flags & MNT_EXPIRE) { |
1725 | if (&mnt->mnt == current->fs->root.mnt || |
1726 | flags & (MNT_FORCE | MNT_DETACH)) |
1727 | return -EINVAL; |
1728 | |
1729 | /* |
1730 | * probably don't strictly need the lock here if we examined |
1731 | * all race cases, but it's a slowpath. |
1732 | */ |
1733 | lock_mount_hash(); |
1734 | if (mnt_get_count(mnt) != 2) { |
1735 | unlock_mount_hash(); |
1736 | return -EBUSY; |
1737 | } |
1738 | unlock_mount_hash(); |
1739 | |
1740 | if (!xchg(&mnt->mnt_expiry_mark, 1)) |
1741 | return -EAGAIN; |
1742 | } |
1743 | |
1744 | /* |
1745 | * If we may have to abort operations to get out of this |
1746 | * mount, and they will themselves hold resources we must |
1747 | * allow the fs to do things. In the Unix tradition of |
1748 | * 'Gee thats tricky lets do it in userspace' the umount_begin |
1749 | * might fail to complete on the first run through as other tasks |
1750 | * must return, and the like. Thats for the mount program to worry |
1751 | * about for the moment. |
1752 | */ |
1753 | |
1754 | if (flags & MNT_FORCE && sb->s_op->umount_begin) { |
1755 | sb->s_op->umount_begin(sb); |
1756 | } |
1757 | |
1758 | /* |
1759 | * No sense to grab the lock for this test, but test itself looks |
1760 | * somewhat bogus. Suggestions for better replacement? |
1761 | * Ho-hum... In principle, we might treat that as umount + switch |
1762 | * to rootfs. GC would eventually take care of the old vfsmount. |
1763 | * Actually it makes sense, especially if rootfs would contain a |
1764 | * /reboot - static binary that would close all descriptors and |
1765 | * call reboot(9). Then init(8) could umount root and exec /reboot. |
1766 | */ |
1767 | if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { |
1768 | /* |
1769 | * Special case for "unmounting" root ... |
1770 | * we just try to remount it readonly. |
1771 | */ |
1772 | if (!ns_capable(ns: sb->s_user_ns, CAP_SYS_ADMIN)) |
1773 | return -EPERM; |
1774 | return do_umount_root(sb); |
1775 | } |
1776 | |
1777 | namespace_lock(); |
1778 | lock_mount_hash(); |
1779 | |
1780 | /* Recheck MNT_LOCKED with the locks held */ |
1781 | retval = -EINVAL; |
1782 | if (mnt->mnt.mnt_flags & MNT_LOCKED) |
1783 | goto out; |
1784 | |
1785 | event++; |
1786 | if (flags & MNT_DETACH) { |
1787 | if (!list_empty(head: &mnt->mnt_list)) |
1788 | umount_tree(mnt, how: UMOUNT_PROPAGATE); |
1789 | retval = 0; |
1790 | } else { |
1791 | shrink_submounts(mnt); |
1792 | retval = -EBUSY; |
1793 | if (!propagate_mount_busy(mnt, 2)) { |
1794 | if (!list_empty(head: &mnt->mnt_list)) |
1795 | umount_tree(mnt, how: UMOUNT_PROPAGATE|UMOUNT_SYNC); |
1796 | retval = 0; |
1797 | } |
1798 | } |
1799 | out: |
1800 | unlock_mount_hash(); |
1801 | namespace_unlock(); |
1802 | return retval; |
1803 | } |
1804 | |
1805 | /* |
1806 | * __detach_mounts - lazily unmount all mounts on the specified dentry |
1807 | * |
1808 | * During unlink, rmdir, and d_drop it is possible to loose the path |
1809 | * to an existing mountpoint, and wind up leaking the mount. |
1810 | * detach_mounts allows lazily unmounting those mounts instead of |
1811 | * leaking them. |
1812 | * |
1813 | * The caller may hold dentry->d_inode->i_mutex. |
1814 | */ |
1815 | void __detach_mounts(struct dentry *dentry) |
1816 | { |
1817 | struct mountpoint *mp; |
1818 | struct mount *mnt; |
1819 | |
1820 | namespace_lock(); |
1821 | lock_mount_hash(); |
1822 | mp = lookup_mountpoint(dentry); |
1823 | if (!mp) |
1824 | goto out_unlock; |
1825 | |
1826 | event++; |
1827 | while (!hlist_empty(h: &mp->m_list)) { |
1828 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); |
1829 | if (mnt->mnt.mnt_flags & MNT_UMOUNT) { |
1830 | umount_mnt(mnt); |
1831 | hlist_add_head(n: &mnt->mnt_umount, h: &unmounted); |
1832 | } |
1833 | else umount_tree(mnt, how: UMOUNT_CONNECTED); |
1834 | } |
1835 | put_mountpoint(mp); |
1836 | out_unlock: |
1837 | unlock_mount_hash(); |
1838 | namespace_unlock(); |
1839 | } |
1840 | |
1841 | /* |
1842 | * Is the caller allowed to modify his namespace? |
1843 | */ |
1844 | bool may_mount(void) |
1845 | { |
1846 | return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); |
1847 | } |
1848 | |
1849 | /** |
1850 | * path_mounted - check whether path is mounted |
1851 | * @path: path to check |
1852 | * |
1853 | * Determine whether @path refers to the root of a mount. |
1854 | * |
1855 | * Return: true if @path is the root of a mount, false if not. |
1856 | */ |
1857 | static inline bool path_mounted(const struct path *path) |
1858 | { |
1859 | return path->mnt->mnt_root == path->dentry; |
1860 | } |
1861 | |
1862 | static void warn_mandlock(void) |
1863 | { |
1864 | pr_warn_once("=======================================================\n" |
1865 | "WARNING: The mand mount option has been deprecated and\n" |
1866 | " and is ignored by this kernel. Remove the mand\n" |
1867 | " option from the mount to silence this warning.\n" |
1868 | "=======================================================\n" ); |
1869 | } |
1870 | |
1871 | static int can_umount(const struct path *path, int flags) |
1872 | { |
1873 | struct mount *mnt = real_mount(mnt: path->mnt); |
1874 | |
1875 | if (!may_mount()) |
1876 | return -EPERM; |
1877 | if (!path_mounted(path)) |
1878 | return -EINVAL; |
1879 | if (!check_mnt(mnt)) |
1880 | return -EINVAL; |
1881 | if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ |
1882 | return -EINVAL; |
1883 | if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) |
1884 | return -EPERM; |
1885 | return 0; |
1886 | } |
1887 | |
1888 | // caller is responsible for flags being sane |
1889 | int path_umount(struct path *path, int flags) |
1890 | { |
1891 | struct mount *mnt = real_mount(mnt: path->mnt); |
1892 | int ret; |
1893 | |
1894 | ret = can_umount(path, flags); |
1895 | if (!ret) |
1896 | ret = do_umount(mnt, flags); |
1897 | |
1898 | /* we mustn't call path_put() as that would clear mnt_expiry_mark */ |
1899 | dput(path->dentry); |
1900 | mntput_no_expire(mnt); |
1901 | return ret; |
1902 | } |
1903 | |
1904 | static int ksys_umount(char __user *name, int flags) |
1905 | { |
1906 | int lookup_flags = LOOKUP_MOUNTPOINT; |
1907 | struct path path; |
1908 | int ret; |
1909 | |
1910 | // basic validity checks done first |
1911 | if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) |
1912 | return -EINVAL; |
1913 | |
1914 | if (!(flags & UMOUNT_NOFOLLOW)) |
1915 | lookup_flags |= LOOKUP_FOLLOW; |
1916 | ret = user_path_at(AT_FDCWD, name, flags: lookup_flags, path: &path); |
1917 | if (ret) |
1918 | return ret; |
1919 | return path_umount(path: &path, flags); |
1920 | } |
1921 | |
1922 | SYSCALL_DEFINE2(umount, char __user *, name, int, flags) |
1923 | { |
1924 | return ksys_umount(name, flags); |
1925 | } |
1926 | |
1927 | #ifdef __ARCH_WANT_SYS_OLDUMOUNT |
1928 | |
1929 | /* |
1930 | * The 2.0 compatible umount. No flags. |
1931 | */ |
1932 | SYSCALL_DEFINE1(oldumount, char __user *, name) |
1933 | { |
1934 | return ksys_umount(name, flags: 0); |
1935 | } |
1936 | |
1937 | #endif |
1938 | |
1939 | static bool is_mnt_ns_file(struct dentry *dentry) |
1940 | { |
1941 | /* Is this a proxy for a mount namespace? */ |
1942 | return dentry->d_op == &ns_dentry_operations && |
1943 | dentry->d_fsdata == &mntns_operations; |
1944 | } |
1945 | |
1946 | static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) |
1947 | { |
1948 | return container_of(ns, struct mnt_namespace, ns); |
1949 | } |
1950 | |
1951 | struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) |
1952 | { |
1953 | return &mnt->ns; |
1954 | } |
1955 | |
1956 | static bool mnt_ns_loop(struct dentry *dentry) |
1957 | { |
1958 | /* Could bind mounting the mount namespace inode cause a |
1959 | * mount namespace loop? |
1960 | */ |
1961 | struct mnt_namespace *mnt_ns; |
1962 | if (!is_mnt_ns_file(dentry)) |
1963 | return false; |
1964 | |
1965 | mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode)); |
1966 | return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; |
1967 | } |
1968 | |
1969 | struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, |
1970 | int flag) |
1971 | { |
1972 | struct mount *res, *p, *q, *r, *parent; |
1973 | |
1974 | if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) |
1975 | return ERR_PTR(error: -EINVAL); |
1976 | |
1977 | if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) |
1978 | return ERR_PTR(error: -EINVAL); |
1979 | |
1980 | res = q = clone_mnt(old: mnt, root: dentry, flag); |
1981 | if (IS_ERR(ptr: q)) |
1982 | return q; |
1983 | |
1984 | q->mnt_mountpoint = mnt->mnt_mountpoint; |
1985 | |
1986 | p = mnt; |
1987 | list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { |
1988 | struct mount *s; |
1989 | if (!is_subdir(r->mnt_mountpoint, dentry)) |
1990 | continue; |
1991 | |
1992 | for (s = r; s; s = next_mnt(p: s, root: r)) { |
1993 | if (!(flag & CL_COPY_UNBINDABLE) && |
1994 | IS_MNT_UNBINDABLE(s)) { |
1995 | if (s->mnt.mnt_flags & MNT_LOCKED) { |
1996 | /* Both unbindable and locked. */ |
1997 | q = ERR_PTR(error: -EPERM); |
1998 | goto out; |
1999 | } else { |
2000 | s = skip_mnt_tree(p: s); |
2001 | continue; |
2002 | } |
2003 | } |
2004 | if (!(flag & CL_COPY_MNT_NS_FILE) && |
2005 | is_mnt_ns_file(dentry: s->mnt.mnt_root)) { |
2006 | s = skip_mnt_tree(p: s); |
2007 | continue; |
2008 | } |
2009 | while (p != s->mnt_parent) { |
2010 | p = p->mnt_parent; |
2011 | q = q->mnt_parent; |
2012 | } |
2013 | p = s; |
2014 | parent = q; |
2015 | q = clone_mnt(old: p, root: p->mnt.mnt_root, flag); |
2016 | if (IS_ERR(ptr: q)) |
2017 | goto out; |
2018 | lock_mount_hash(); |
2019 | list_add_tail(new: &q->mnt_list, head: &res->mnt_list); |
2020 | attach_mnt(mnt: q, parent, mp: p->mnt_mp, beneath: false); |
2021 | unlock_mount_hash(); |
2022 | } |
2023 | } |
2024 | return res; |
2025 | out: |
2026 | if (res) { |
2027 | lock_mount_hash(); |
2028 | umount_tree(mnt: res, how: UMOUNT_SYNC); |
2029 | unlock_mount_hash(); |
2030 | } |
2031 | return q; |
2032 | } |
2033 | |
2034 | /* Caller should check returned pointer for errors */ |
2035 | |
2036 | struct vfsmount *collect_mounts(const struct path *path) |
2037 | { |
2038 | struct mount *tree; |
2039 | namespace_lock(); |
2040 | if (!check_mnt(mnt: real_mount(mnt: path->mnt))) |
2041 | tree = ERR_PTR(error: -EINVAL); |
2042 | else |
2043 | tree = copy_tree(mnt: real_mount(mnt: path->mnt), dentry: path->dentry, |
2044 | CL_COPY_ALL | CL_PRIVATE); |
2045 | namespace_unlock(); |
2046 | if (IS_ERR(ptr: tree)) |
2047 | return ERR_CAST(ptr: tree); |
2048 | return &tree->mnt; |
2049 | } |
2050 | |
2051 | static void free_mnt_ns(struct mnt_namespace *); |
2052 | static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); |
2053 | |
2054 | void dissolve_on_fput(struct vfsmount *mnt) |
2055 | { |
2056 | struct mnt_namespace *ns; |
2057 | namespace_lock(); |
2058 | lock_mount_hash(); |
2059 | ns = real_mount(mnt)->mnt_ns; |
2060 | if (ns) { |
2061 | if (is_anon_ns(ns)) |
2062 | umount_tree(mnt: real_mount(mnt), how: UMOUNT_CONNECTED); |
2063 | else |
2064 | ns = NULL; |
2065 | } |
2066 | unlock_mount_hash(); |
2067 | namespace_unlock(); |
2068 | if (ns) |
2069 | free_mnt_ns(ns); |
2070 | } |
2071 | |
2072 | void drop_collected_mounts(struct vfsmount *mnt) |
2073 | { |
2074 | namespace_lock(); |
2075 | lock_mount_hash(); |
2076 | umount_tree(mnt: real_mount(mnt), how: 0); |
2077 | unlock_mount_hash(); |
2078 | namespace_unlock(); |
2079 | } |
2080 | |
2081 | static bool has_locked_children(struct mount *mnt, struct dentry *dentry) |
2082 | { |
2083 | struct mount *child; |
2084 | |
2085 | list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { |
2086 | if (!is_subdir(child->mnt_mountpoint, dentry)) |
2087 | continue; |
2088 | |
2089 | if (child->mnt.mnt_flags & MNT_LOCKED) |
2090 | return true; |
2091 | } |
2092 | return false; |
2093 | } |
2094 | |
2095 | /** |
2096 | * clone_private_mount - create a private clone of a path |
2097 | * @path: path to clone |
2098 | * |
2099 | * This creates a new vfsmount, which will be the clone of @path. The new mount |
2100 | * will not be attached anywhere in the namespace and will be private (i.e. |
2101 | * changes to the originating mount won't be propagated into this). |
2102 | * |
2103 | * Release with mntput(). |
2104 | */ |
2105 | struct vfsmount *clone_private_mount(const struct path *path) |
2106 | { |
2107 | struct mount *old_mnt = real_mount(mnt: path->mnt); |
2108 | struct mount *new_mnt; |
2109 | |
2110 | down_read(sem: &namespace_sem); |
2111 | if (IS_MNT_UNBINDABLE(old_mnt)) |
2112 | goto invalid; |
2113 | |
2114 | if (!check_mnt(mnt: old_mnt)) |
2115 | goto invalid; |
2116 | |
2117 | if (has_locked_children(mnt: old_mnt, dentry: path->dentry)) |
2118 | goto invalid; |
2119 | |
2120 | new_mnt = clone_mnt(old: old_mnt, root: path->dentry, CL_PRIVATE); |
2121 | up_read(sem: &namespace_sem); |
2122 | |
2123 | if (IS_ERR(ptr: new_mnt)) |
2124 | return ERR_CAST(ptr: new_mnt); |
2125 | |
2126 | /* Longterm mount to be removed by kern_unmount*() */ |
2127 | new_mnt->mnt_ns = MNT_NS_INTERNAL; |
2128 | |
2129 | return &new_mnt->mnt; |
2130 | |
2131 | invalid: |
2132 | up_read(sem: &namespace_sem); |
2133 | return ERR_PTR(error: -EINVAL); |
2134 | } |
2135 | EXPORT_SYMBOL_GPL(clone_private_mount); |
2136 | |
2137 | int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, |
2138 | struct vfsmount *root) |
2139 | { |
2140 | struct mount *mnt; |
2141 | int res = f(root, arg); |
2142 | if (res) |
2143 | return res; |
2144 | list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { |
2145 | res = f(&mnt->mnt, arg); |
2146 | if (res) |
2147 | return res; |
2148 | } |
2149 | return 0; |
2150 | } |
2151 | |
2152 | static void lock_mnt_tree(struct mount *mnt) |
2153 | { |
2154 | struct mount *p; |
2155 | |
2156 | for (p = mnt; p; p = next_mnt(p, root: mnt)) { |
2157 | int flags = p->mnt.mnt_flags; |
2158 | /* Don't allow unprivileged users to change mount flags */ |
2159 | flags |= MNT_LOCK_ATIME; |
2160 | |
2161 | if (flags & MNT_READONLY) |
2162 | flags |= MNT_LOCK_READONLY; |
2163 | |
2164 | if (flags & MNT_NODEV) |
2165 | flags |= MNT_LOCK_NODEV; |
2166 | |
2167 | if (flags & MNT_NOSUID) |
2168 | flags |= MNT_LOCK_NOSUID; |
2169 | |
2170 | if (flags & MNT_NOEXEC) |
2171 | flags |= MNT_LOCK_NOEXEC; |
2172 | /* Don't allow unprivileged users to reveal what is under a mount */ |
2173 | if (list_empty(head: &p->mnt_expire)) |
2174 | flags |= MNT_LOCKED; |
2175 | p->mnt.mnt_flags = flags; |
2176 | } |
2177 | } |
2178 | |
2179 | static void cleanup_group_ids(struct mount *mnt, struct mount *end) |
2180 | { |
2181 | struct mount *p; |
2182 | |
2183 | for (p = mnt; p != end; p = next_mnt(p, root: mnt)) { |
2184 | if (p->mnt_group_id && !IS_MNT_SHARED(p)) |
2185 | mnt_release_group_id(mnt: p); |
2186 | } |
2187 | } |
2188 | |
2189 | static int invent_group_ids(struct mount *mnt, bool recurse) |
2190 | { |
2191 | struct mount *p; |
2192 | |
2193 | for (p = mnt; p; p = recurse ? next_mnt(p, root: mnt) : NULL) { |
2194 | if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { |
2195 | int err = mnt_alloc_group_id(mnt: p); |
2196 | if (err) { |
2197 | cleanup_group_ids(mnt, end: p); |
2198 | return err; |
2199 | } |
2200 | } |
2201 | } |
2202 | |
2203 | return 0; |
2204 | } |
2205 | |
2206 | int count_mounts(struct mnt_namespace *ns, struct mount *mnt) |
2207 | { |
2208 | unsigned int max = READ_ONCE(sysctl_mount_max); |
2209 | unsigned int mounts = 0; |
2210 | struct mount *p; |
2211 | |
2212 | if (ns->mounts >= max) |
2213 | return -ENOSPC; |
2214 | max -= ns->mounts; |
2215 | if (ns->pending_mounts >= max) |
2216 | return -ENOSPC; |
2217 | max -= ns->pending_mounts; |
2218 | |
2219 | for (p = mnt; p; p = next_mnt(p, root: mnt)) |
2220 | mounts++; |
2221 | |
2222 | if (mounts > max) |
2223 | return -ENOSPC; |
2224 | |
2225 | ns->pending_mounts += mounts; |
2226 | return 0; |
2227 | } |
2228 | |
2229 | enum mnt_tree_flags_t { |
2230 | MNT_TREE_MOVE = BIT(0), |
2231 | MNT_TREE_BENEATH = BIT(1), |
2232 | }; |
2233 | |
2234 | /** |
2235 | * attach_recursive_mnt - attach a source mount tree |
2236 | * @source_mnt: mount tree to be attached |
2237 | * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath |
2238 | * @dest_mp: the mountpoint @source_mnt will be mounted at |
2239 | * @flags: modify how @source_mnt is supposed to be attached |
2240 | * |
2241 | * NOTE: in the table below explains the semantics when a source mount |
2242 | * of a given type is attached to a destination mount of a given type. |
2243 | * --------------------------------------------------------------------------- |
2244 | * | BIND MOUNT OPERATION | |
2245 | * |************************************************************************** |
2246 | * | source-->| shared | private | slave | unbindable | |
2247 | * | dest | | | | | |
2248 | * | | | | | | | |
2249 | * | v | | | | | |
2250 | * |************************************************************************** |
2251 | * | shared | shared (++) | shared (+) | shared(+++)| invalid | |
2252 | * | | | | | | |
2253 | * |non-shared| shared (+) | private | slave (*) | invalid | |
2254 | * *************************************************************************** |
2255 | * A bind operation clones the source mount and mounts the clone on the |
2256 | * destination mount. |
2257 | * |
2258 | * (++) the cloned mount is propagated to all the mounts in the propagation |
2259 | * tree of the destination mount and the cloned mount is added to |
2260 | * the peer group of the source mount. |
2261 | * (+) the cloned mount is created under the destination mount and is marked |
2262 | * as shared. The cloned mount is added to the peer group of the source |
2263 | * mount. |
2264 | * (+++) the mount is propagated to all the mounts in the propagation tree |
2265 | * of the destination mount and the cloned mount is made slave |
2266 | * of the same master as that of the source mount. The cloned mount |
2267 | * is marked as 'shared and slave'. |
2268 | * (*) the cloned mount is made a slave of the same master as that of the |
2269 | * source mount. |
2270 | * |
2271 | * --------------------------------------------------------------------------- |
2272 | * | MOVE MOUNT OPERATION | |
2273 | * |************************************************************************** |
2274 | * | source-->| shared | private | slave | unbindable | |
2275 | * | dest | | | | | |
2276 | * | | | | | | | |
2277 | * | v | | | | | |
2278 | * |************************************************************************** |
2279 | * | shared | shared (+) | shared (+) | shared(+++) | invalid | |
2280 | * | | | | | | |
2281 | * |non-shared| shared (+*) | private | slave (*) | unbindable | |
2282 | * *************************************************************************** |
2283 | * |
2284 | * (+) the mount is moved to the destination. And is then propagated to |
2285 | * all the mounts in the propagation tree of the destination mount. |
2286 | * (+*) the mount is moved to the destination. |
2287 | * (+++) the mount is moved to the destination and is then propagated to |
2288 | * all the mounts belonging to the destination mount's propagation tree. |
2289 | * the mount is marked as 'shared and slave'. |
2290 | * (*) the mount continues to be a slave at the new location. |
2291 | * |
2292 | * if the source mount is a tree, the operations explained above is |
2293 | * applied to each mount in the tree. |
2294 | * Must be called without spinlocks held, since this function can sleep |
2295 | * in allocations. |
2296 | * |
2297 | * Context: The function expects namespace_lock() to be held. |
2298 | * Return: If @source_mnt was successfully attached 0 is returned. |
2299 | * Otherwise a negative error code is returned. |
2300 | */ |
2301 | static int attach_recursive_mnt(struct mount *source_mnt, |
2302 | struct mount *top_mnt, |
2303 | struct mountpoint *dest_mp, |
2304 | enum mnt_tree_flags_t flags) |
2305 | { |
2306 | struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; |
2307 | HLIST_HEAD(tree_list); |
2308 | struct mnt_namespace *ns = top_mnt->mnt_ns; |
2309 | struct mountpoint *smp; |
2310 | struct mount *child, *dest_mnt, *p; |
2311 | struct hlist_node *n; |
2312 | int err = 0; |
2313 | bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH; |
2314 | |
2315 | /* |
2316 | * Preallocate a mountpoint in case the new mounts need to be |
2317 | * mounted beneath mounts on the same mountpoint. |
2318 | */ |
2319 | smp = get_mountpoint(dentry: source_mnt->mnt.mnt_root); |
2320 | if (IS_ERR(ptr: smp)) |
2321 | return PTR_ERR(ptr: smp); |
2322 | |
2323 | /* Is there space to add these mounts to the mount namespace? */ |
2324 | if (!moving) { |
2325 | err = count_mounts(ns, mnt: source_mnt); |
2326 | if (err) |
2327 | goto out; |
2328 | } |
2329 | |
2330 | if (beneath) |
2331 | dest_mnt = top_mnt->mnt_parent; |
2332 | else |
2333 | dest_mnt = top_mnt; |
2334 | |
2335 | if (IS_MNT_SHARED(dest_mnt)) { |
2336 | err = invent_group_ids(mnt: source_mnt, recurse: true); |
2337 | if (err) |
2338 | goto out; |
2339 | err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); |
2340 | } |
2341 | lock_mount_hash(); |
2342 | if (err) |
2343 | goto out_cleanup_ids; |
2344 | |
2345 | if (IS_MNT_SHARED(dest_mnt)) { |
2346 | for (p = source_mnt; p; p = next_mnt(p, root: source_mnt)) |
2347 | set_mnt_shared(p); |
2348 | } |
2349 | |
2350 | if (moving) { |
2351 | if (beneath) |
2352 | dest_mp = smp; |
2353 | unhash_mnt(mnt: source_mnt); |
2354 | attach_mnt(mnt: source_mnt, parent: top_mnt, mp: dest_mp, beneath); |
2355 | touch_mnt_namespace(ns: source_mnt->mnt_ns); |
2356 | } else { |
2357 | if (source_mnt->mnt_ns) { |
2358 | /* move from anon - the caller will destroy */ |
2359 | list_del_init(entry: &source_mnt->mnt_ns->list); |
2360 | } |
2361 | if (beneath) |
2362 | mnt_set_mountpoint_beneath(new_parent: source_mnt, top_mnt, new_mp: smp); |
2363 | else |
2364 | mnt_set_mountpoint(mnt: dest_mnt, mp: dest_mp, child_mnt: source_mnt); |
2365 | commit_tree(mnt: source_mnt); |
2366 | } |
2367 | |
2368 | hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { |
2369 | struct mount *q; |
2370 | hlist_del_init(n: &child->mnt_hash); |
2371 | q = __lookup_mnt(mnt: &child->mnt_parent->mnt, |
2372 | dentry: child->mnt_mountpoint); |
2373 | if (q) |
2374 | mnt_change_mountpoint(parent: child, mp: smp, mnt: q); |
2375 | /* Notice when we are propagating across user namespaces */ |
2376 | if (child->mnt_parent->mnt_ns->user_ns != user_ns) |
2377 | lock_mnt_tree(mnt: child); |
2378 | child->mnt.mnt_flags &= ~MNT_LOCKED; |
2379 | commit_tree(mnt: child); |
2380 | } |
2381 | put_mountpoint(mp: smp); |
2382 | unlock_mount_hash(); |
2383 | |
2384 | return 0; |
2385 | |
2386 | out_cleanup_ids: |
2387 | while (!hlist_empty(h: &tree_list)) { |
2388 | child = hlist_entry(tree_list.first, struct mount, mnt_hash); |
2389 | child->mnt_parent->mnt_ns->pending_mounts = 0; |
2390 | umount_tree(mnt: child, how: UMOUNT_SYNC); |
2391 | } |
2392 | unlock_mount_hash(); |
2393 | cleanup_group_ids(mnt: source_mnt, NULL); |
2394 | out: |
2395 | ns->pending_mounts = 0; |
2396 | |
2397 | read_seqlock_excl(sl: &mount_lock); |
2398 | put_mountpoint(mp: smp); |
2399 | read_sequnlock_excl(sl: &mount_lock); |
2400 | |
2401 | return err; |
2402 | } |
2403 | |
2404 | /** |
2405 | * do_lock_mount - lock mount and mountpoint |
2406 | * @path: target path |
2407 | * @beneath: whether the intention is to mount beneath @path |
2408 | * |
2409 | * Follow the mount stack on @path until the top mount @mnt is found. If |
2410 | * the initial @path->{mnt,dentry} is a mountpoint lookup the first |
2411 | * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root} |
2412 | * until nothing is stacked on top of it anymore. |
2413 | * |
2414 | * Acquire the inode_lock() on the top mount's ->mnt_root to protect |
2415 | * against concurrent removal of the new mountpoint from another mount |
2416 | * namespace. |
2417 | * |
2418 | * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint |
2419 | * @mp on @mnt->mnt_parent must be acquired. This protects against a |
2420 | * concurrent unlink of @mp->mnt_dentry from another mount namespace |
2421 | * where @mnt doesn't have a child mount mounted @mp. A concurrent |
2422 | * removal of @mnt->mnt_root doesn't matter as nothing will be mounted |
2423 | * on top of it for @beneath. |
2424 | * |
2425 | * In addition, @beneath needs to make sure that @mnt hasn't been |
2426 | * unmounted or moved from its current mountpoint in between dropping |
2427 | * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt |
2428 | * being unmounted would be detected later by e.g., calling |
2429 | * check_mnt(mnt) in the function it's called from. For the @beneath |
2430 | * case however, it's useful to detect it directly in do_lock_mount(). |
2431 | * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points |
2432 | * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will |
2433 | * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL. |
2434 | * |
2435 | * Return: Either the target mountpoint on the top mount or the top |
2436 | * mount's mountpoint. |
2437 | */ |
2438 | static struct mountpoint *do_lock_mount(struct path *path, bool beneath) |
2439 | { |
2440 | struct vfsmount *mnt = path->mnt; |
2441 | struct dentry *dentry; |
2442 | struct mountpoint *mp = ERR_PTR(error: -ENOENT); |
2443 | |
2444 | for (;;) { |
2445 | struct mount *m; |
2446 | |
2447 | if (beneath) { |
2448 | m = real_mount(mnt); |
2449 | read_seqlock_excl(sl: &mount_lock); |
2450 | dentry = dget(dentry: m->mnt_mountpoint); |
2451 | read_sequnlock_excl(sl: &mount_lock); |
2452 | } else { |
2453 | dentry = path->dentry; |
2454 | } |
2455 | |
2456 | inode_lock(inode: dentry->d_inode); |
2457 | if (unlikely(cant_mount(dentry))) { |
2458 | inode_unlock(inode: dentry->d_inode); |
2459 | goto out; |
2460 | } |
2461 | |
2462 | namespace_lock(); |
2463 | |
2464 | if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) { |
2465 | namespace_unlock(); |
2466 | inode_unlock(inode: dentry->d_inode); |
2467 | goto out; |
2468 | } |
2469 | |
2470 | mnt = lookup_mnt(path); |
2471 | if (likely(!mnt)) |
2472 | break; |
2473 | |
2474 | namespace_unlock(); |
2475 | inode_unlock(inode: dentry->d_inode); |
2476 | if (beneath) |
2477 | dput(dentry); |
2478 | path_put(path); |
2479 | path->mnt = mnt; |
2480 | path->dentry = dget(dentry: mnt->mnt_root); |
2481 | } |
2482 | |
2483 | mp = get_mountpoint(dentry); |
2484 | if (IS_ERR(ptr: mp)) { |
2485 | namespace_unlock(); |
2486 | inode_unlock(inode: dentry->d_inode); |
2487 | } |
2488 | |
2489 | out: |
2490 | if (beneath) |
2491 | dput(dentry); |
2492 | |
2493 | return mp; |
2494 | } |
2495 | |
2496 | static inline struct mountpoint *lock_mount(struct path *path) |
2497 | { |
2498 | return do_lock_mount(path, beneath: false); |
2499 | } |
2500 | |
2501 | static void unlock_mount(struct mountpoint *where) |
2502 | { |
2503 | struct dentry *dentry = where->m_dentry; |
2504 | |
2505 | read_seqlock_excl(sl: &mount_lock); |
2506 | put_mountpoint(mp: where); |
2507 | read_sequnlock_excl(sl: &mount_lock); |
2508 | |
2509 | namespace_unlock(); |
2510 | inode_unlock(inode: dentry->d_inode); |
2511 | } |
2512 | |
2513 | static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) |
2514 | { |
2515 | if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) |
2516 | return -EINVAL; |
2517 | |
2518 | if (d_is_dir(dentry: mp->m_dentry) != |
2519 | d_is_dir(dentry: mnt->mnt.mnt_root)) |
2520 | return -ENOTDIR; |
2521 | |
2522 | return attach_recursive_mnt(source_mnt: mnt, top_mnt: p, dest_mp: mp, flags: 0); |
2523 | } |
2524 | |
2525 | /* |
2526 | * Sanity check the flags to change_mnt_propagation. |
2527 | */ |
2528 | |
2529 | static int flags_to_propagation_type(int ms_flags) |
2530 | { |
2531 | int type = ms_flags & ~(MS_REC | MS_SILENT); |
2532 | |
2533 | /* Fail if any non-propagation flags are set */ |
2534 | if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) |
2535 | return 0; |
2536 | /* Only one propagation flag should be set */ |
2537 | if (!is_power_of_2(n: type)) |
2538 | return 0; |
2539 | return type; |
2540 | } |
2541 | |
2542 | /* |
2543 | * recursively change the type of the mountpoint. |
2544 | */ |
2545 | static int do_change_type(struct path *path, int ms_flags) |
2546 | { |
2547 | struct mount *m; |
2548 | struct mount *mnt = real_mount(mnt: path->mnt); |
2549 | int recurse = ms_flags & MS_REC; |
2550 | int type; |
2551 | int err = 0; |
2552 | |
2553 | if (!path_mounted(path)) |
2554 | return -EINVAL; |
2555 | |
2556 | type = flags_to_propagation_type(ms_flags); |
2557 | if (!type) |
2558 | return -EINVAL; |
2559 | |
2560 | namespace_lock(); |
2561 | if (type == MS_SHARED) { |
2562 | err = invent_group_ids(mnt, recurse); |
2563 | if (err) |
2564 | goto out_unlock; |
2565 | } |
2566 | |
2567 | lock_mount_hash(); |
2568 | for (m = mnt; m; m = (recurse ? next_mnt(p: m, root: mnt) : NULL)) |
2569 | change_mnt_propagation(m, type); |
2570 | unlock_mount_hash(); |
2571 | |
2572 | out_unlock: |
2573 | namespace_unlock(); |
2574 | return err; |
2575 | } |
2576 | |
2577 | static struct mount *__do_loopback(struct path *old_path, int recurse) |
2578 | { |
2579 | struct mount *mnt = ERR_PTR(error: -EINVAL), *old = real_mount(mnt: old_path->mnt); |
2580 | |
2581 | if (IS_MNT_UNBINDABLE(old)) |
2582 | return mnt; |
2583 | |
2584 | if (!check_mnt(mnt: old) && old_path->dentry->d_op != &ns_dentry_operations) |
2585 | return mnt; |
2586 | |
2587 | if (!recurse && has_locked_children(mnt: old, dentry: old_path->dentry)) |
2588 | return mnt; |
2589 | |
2590 | if (recurse) |
2591 | mnt = copy_tree(mnt: old, dentry: old_path->dentry, CL_COPY_MNT_NS_FILE); |
2592 | else |
2593 | mnt = clone_mnt(old, root: old_path->dentry, flag: 0); |
2594 | |
2595 | if (!IS_ERR(ptr: mnt)) |
2596 | mnt->mnt.mnt_flags &= ~MNT_LOCKED; |
2597 | |
2598 | return mnt; |
2599 | } |
2600 | |
2601 | /* |
2602 | * do loopback mount. |
2603 | */ |
2604 | static int do_loopback(struct path *path, const char *old_name, |
2605 | int recurse) |
2606 | { |
2607 | struct path old_path; |
2608 | struct mount *mnt = NULL, *parent; |
2609 | struct mountpoint *mp; |
2610 | int err; |
2611 | if (!old_name || !*old_name) |
2612 | return -EINVAL; |
2613 | err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); |
2614 | if (err) |
2615 | return err; |
2616 | |
2617 | err = -EINVAL; |
2618 | if (mnt_ns_loop(dentry: old_path.dentry)) |
2619 | goto out; |
2620 | |
2621 | mp = lock_mount(path); |
2622 | if (IS_ERR(ptr: mp)) { |
2623 | err = PTR_ERR(ptr: mp); |
2624 | goto out; |
2625 | } |
2626 | |
2627 | parent = real_mount(mnt: path->mnt); |
2628 | if (!check_mnt(mnt: parent)) |
2629 | goto out2; |
2630 | |
2631 | mnt = __do_loopback(old_path: &old_path, recurse); |
2632 | if (IS_ERR(ptr: mnt)) { |
2633 | err = PTR_ERR(ptr: mnt); |
2634 | goto out2; |
2635 | } |
2636 | |
2637 | err = graft_tree(mnt, p: parent, mp); |
2638 | if (err) { |
2639 | lock_mount_hash(); |
2640 | umount_tree(mnt, how: UMOUNT_SYNC); |
2641 | unlock_mount_hash(); |
2642 | } |
2643 | out2: |
2644 | unlock_mount(where: mp); |
2645 | out: |
2646 | path_put(&old_path); |
2647 | return err; |
2648 | } |
2649 | |
2650 | static struct file *open_detached_copy(struct path *path, bool recursive) |
2651 | { |
2652 | struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; |
2653 | struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true); |
2654 | struct mount *mnt, *p; |
2655 | struct file *file; |
2656 | |
2657 | if (IS_ERR(ptr: ns)) |
2658 | return ERR_CAST(ptr: ns); |
2659 | |
2660 | namespace_lock(); |
2661 | mnt = __do_loopback(old_path: path, recurse: recursive); |
2662 | if (IS_ERR(ptr: mnt)) { |
2663 | namespace_unlock(); |
2664 | free_mnt_ns(ns); |
2665 | return ERR_CAST(ptr: mnt); |
2666 | } |
2667 | |
2668 | lock_mount_hash(); |
2669 | for (p = mnt; p; p = next_mnt(p, root: mnt)) { |
2670 | p->mnt_ns = ns; |
2671 | ns->mounts++; |
2672 | } |
2673 | ns->root = mnt; |
2674 | list_add_tail(new: &ns->list, head: &mnt->mnt_list); |
2675 | mntget(&mnt->mnt); |
2676 | unlock_mount_hash(); |
2677 | namespace_unlock(); |
2678 | |
2679 | mntput(path->mnt); |
2680 | path->mnt = &mnt->mnt; |
2681 | file = dentry_open(path, O_PATH, current_cred()); |
2682 | if (IS_ERR(ptr: file)) |
2683 | dissolve_on_fput(mnt: path->mnt); |
2684 | else |
2685 | file->f_mode |= FMODE_NEED_UNMOUNT; |
2686 | return file; |
2687 | } |
2688 | |
2689 | SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) |
2690 | { |
2691 | struct file *file; |
2692 | struct path path; |
2693 | int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; |
2694 | bool detached = flags & OPEN_TREE_CLONE; |
2695 | int error; |
2696 | int fd; |
2697 | |
2698 | BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC); |
2699 | |
2700 | if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE | |
2701 | AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE | |
2702 | OPEN_TREE_CLOEXEC)) |
2703 | return -EINVAL; |
2704 | |
2705 | if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE) |
2706 | return -EINVAL; |
2707 | |
2708 | if (flags & AT_NO_AUTOMOUNT) |
2709 | lookup_flags &= ~LOOKUP_AUTOMOUNT; |
2710 | if (flags & AT_SYMLINK_NOFOLLOW) |
2711 | lookup_flags &= ~LOOKUP_FOLLOW; |
2712 | if (flags & AT_EMPTY_PATH) |
2713 | lookup_flags |= LOOKUP_EMPTY; |
2714 | |
2715 | if (detached && !may_mount()) |
2716 | return -EPERM; |
2717 | |
2718 | fd = get_unused_fd_flags(flags: flags & O_CLOEXEC); |
2719 | if (fd < 0) |
2720 | return fd; |
2721 | |
2722 | error = user_path_at(dfd, name: filename, flags: lookup_flags, path: &path); |
2723 | if (unlikely(error)) { |
2724 | file = ERR_PTR(error); |
2725 | } else { |
2726 | if (detached) |
2727 | file = open_detached_copy(path: &path, recursive: flags & AT_RECURSIVE); |
2728 | else |
2729 | file = dentry_open(path: &path, O_PATH, current_cred()); |
2730 | path_put(&path); |
2731 | } |
2732 | if (IS_ERR(ptr: file)) { |
2733 | put_unused_fd(fd); |
2734 | return PTR_ERR(ptr: file); |
2735 | } |
2736 | fd_install(fd, file); |
2737 | return fd; |
2738 | } |
2739 | |
2740 | /* |
2741 | * Don't allow locked mount flags to be cleared. |
2742 | * |
2743 | * No locks need to be held here while testing the various MNT_LOCK |
2744 | * flags because those flags can never be cleared once they are set. |
2745 | */ |
2746 | static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) |
2747 | { |
2748 | unsigned int fl = mnt->mnt.mnt_flags; |
2749 | |
2750 | if ((fl & MNT_LOCK_READONLY) && |
2751 | !(mnt_flags & MNT_READONLY)) |
2752 | return false; |
2753 | |
2754 | if ((fl & MNT_LOCK_NODEV) && |
2755 | !(mnt_flags & MNT_NODEV)) |
2756 | return false; |
2757 | |
2758 | if ((fl & MNT_LOCK_NOSUID) && |
2759 | !(mnt_flags & MNT_NOSUID)) |
2760 | return false; |
2761 | |
2762 | if ((fl & MNT_LOCK_NOEXEC) && |
2763 | !(mnt_flags & MNT_NOEXEC)) |
2764 | return false; |
2765 | |
2766 | if ((fl & MNT_LOCK_ATIME) && |
2767 | ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) |
2768 | return false; |
2769 | |
2770 | return true; |
2771 | } |
2772 | |
2773 | static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) |
2774 | { |
2775 | bool readonly_request = (mnt_flags & MNT_READONLY); |
2776 | |
2777 | if (readonly_request == __mnt_is_readonly(&mnt->mnt)) |
2778 | return 0; |
2779 | |
2780 | if (readonly_request) |
2781 | return mnt_make_readonly(mnt); |
2782 | |
2783 | mnt->mnt.mnt_flags &= ~MNT_READONLY; |
2784 | return 0; |
2785 | } |
2786 | |
2787 | static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) |
2788 | { |
2789 | mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; |
2790 | mnt->mnt.mnt_flags = mnt_flags; |
2791 | touch_mnt_namespace(ns: mnt->mnt_ns); |
2792 | } |
2793 | |
2794 | static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt) |
2795 | { |
2796 | struct super_block *sb = mnt->mnt_sb; |
2797 | |
2798 | if (!__mnt_is_readonly(mnt) && |
2799 | (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && |
2800 | (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { |
2801 | char *buf = (char *)__get_free_page(GFP_KERNEL); |
2802 | char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(error: -ENOMEM); |
2803 | |
2804 | pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n" , |
2805 | sb->s_type->name, |
2806 | is_mounted(mnt) ? "remounted" : "mounted" , |
2807 | mntpath, &sb->s_time_max, |
2808 | (unsigned long long)sb->s_time_max); |
2809 | |
2810 | free_page((unsigned long)buf); |
2811 | sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; |
2812 | } |
2813 | } |
2814 | |
2815 | /* |
2816 | * Handle reconfiguration of the mountpoint only without alteration of the |
2817 | * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND |
2818 | * to mount(2). |
2819 | */ |
2820 | static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags) |
2821 | { |
2822 | struct super_block *sb = path->mnt->mnt_sb; |
2823 | struct mount *mnt = real_mount(mnt: path->mnt); |
2824 | int ret; |
2825 | |
2826 | if (!check_mnt(mnt)) |
2827 | return -EINVAL; |
2828 | |
2829 | if (!path_mounted(path)) |
2830 | return -EINVAL; |
2831 | |
2832 | if (!can_change_locked_flags(mnt, mnt_flags)) |
2833 | return -EPERM; |
2834 | |
2835 | /* |
2836 | * We're only checking whether the superblock is read-only not |
2837 | * changing it, so only take down_read(&sb->s_umount). |
2838 | */ |
2839 | down_read(sem: &sb->s_umount); |
2840 | lock_mount_hash(); |
2841 | ret = change_mount_ro_state(mnt, mnt_flags); |
2842 | if (ret == 0) |
2843 | set_mount_attributes(mnt, mnt_flags); |
2844 | unlock_mount_hash(); |
2845 | up_read(sem: &sb->s_umount); |
2846 | |
2847 | mnt_warn_timestamp_expiry(mountpoint: path, mnt: &mnt->mnt); |
2848 | |
2849 | return ret; |
2850 | } |
2851 | |
2852 | /* |
2853 | * change filesystem flags. dir should be a physical root of filesystem. |
2854 | * If you've mounted a non-root directory somewhere and want to do remount |
2855 | * on it - tough luck. |
2856 | */ |
2857 | static int do_remount(struct path *path, int ms_flags, int sb_flags, |
2858 | int mnt_flags, void *data) |
2859 | { |
2860 | int err; |
2861 | struct super_block *sb = path->mnt->mnt_sb; |
2862 | struct mount *mnt = real_mount(mnt: path->mnt); |
2863 | struct fs_context *fc; |
2864 | |
2865 | if (!check_mnt(mnt)) |
2866 | return -EINVAL; |
2867 | |
2868 | if (!path_mounted(path)) |
2869 | return -EINVAL; |
2870 | |
2871 | if (!can_change_locked_flags(mnt, mnt_flags)) |
2872 | return -EPERM; |
2873 | |
2874 | fc = fs_context_for_reconfigure(dentry: path->dentry, sb_flags, MS_RMT_MASK); |
2875 | if (IS_ERR(ptr: fc)) |
2876 | return PTR_ERR(ptr: fc); |
2877 | |
2878 | fc->oldapi = true; |
2879 | err = parse_monolithic_mount_data(fc, data); |
2880 | if (!err) { |
2881 | down_write(sem: &sb->s_umount); |
2882 | err = -EPERM; |
2883 | if (ns_capable(ns: sb->s_user_ns, CAP_SYS_ADMIN)) { |
2884 | err = reconfigure_super(fc); |
2885 | if (!err) { |
2886 | lock_mount_hash(); |
2887 | set_mount_attributes(mnt, mnt_flags); |
2888 | unlock_mount_hash(); |
2889 | } |
2890 | } |
2891 | up_write(sem: &sb->s_umount); |
2892 | } |
2893 | |
2894 | mnt_warn_timestamp_expiry(mountpoint: path, mnt: &mnt->mnt); |
2895 | |
2896 | put_fs_context(fc); |
2897 | return err; |
2898 | } |
2899 | |
2900 | static inline int tree_contains_unbindable(struct mount *mnt) |
2901 | { |
2902 | struct mount *p; |
2903 | for (p = mnt; p; p = next_mnt(p, root: mnt)) { |
2904 | if (IS_MNT_UNBINDABLE(p)) |
2905 | return 1; |
2906 | } |
2907 | return 0; |
2908 | } |
2909 | |
2910 | /* |
2911 | * Check that there aren't references to earlier/same mount namespaces in the |
2912 | * specified subtree. Such references can act as pins for mount namespaces |
2913 | * that aren't checked by the mount-cycle checking code, thereby allowing |
2914 | * cycles to be made. |
2915 | */ |
2916 | static bool check_for_nsfs_mounts(struct mount *subtree) |
2917 | { |
2918 | struct mount *p; |
2919 | bool ret = false; |
2920 | |
2921 | lock_mount_hash(); |
2922 | for (p = subtree; p; p = next_mnt(p, root: subtree)) |
2923 | if (mnt_ns_loop(dentry: p->mnt.mnt_root)) |
2924 | goto out; |
2925 | |
2926 | ret = true; |
2927 | out: |
2928 | unlock_mount_hash(); |
2929 | return ret; |
2930 | } |
2931 | |
2932 | static int do_set_group(struct path *from_path, struct path *to_path) |
2933 | { |
2934 | struct mount *from, *to; |
2935 | int err; |
2936 | |
2937 | from = real_mount(mnt: from_path->mnt); |
2938 | to = real_mount(mnt: to_path->mnt); |
2939 | |
2940 | namespace_lock(); |
2941 | |
2942 | err = -EINVAL; |
2943 | /* To and From must be mounted */ |
2944 | if (!is_mounted(mnt: &from->mnt)) |
2945 | goto out; |
2946 | if (!is_mounted(mnt: &to->mnt)) |
2947 | goto out; |
2948 | |
2949 | err = -EPERM; |
2950 | /* We should be allowed to modify mount namespaces of both mounts */ |
2951 | if (!ns_capable(ns: from->mnt_ns->user_ns, CAP_SYS_ADMIN)) |
2952 | goto out; |
2953 | if (!ns_capable(ns: to->mnt_ns->user_ns, CAP_SYS_ADMIN)) |
2954 | goto out; |
2955 | |
2956 | err = -EINVAL; |
2957 | /* To and From paths should be mount roots */ |
2958 | if (!path_mounted(path: from_path)) |
2959 | goto out; |
2960 | if (!path_mounted(path: to_path)) |
2961 | goto out; |
2962 | |
2963 | /* Setting sharing groups is only allowed across same superblock */ |
2964 | if (from->mnt.mnt_sb != to->mnt.mnt_sb) |
2965 | goto out; |
2966 | |
2967 | /* From mount root should be wider than To mount root */ |
2968 | if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) |
2969 | goto out; |
2970 | |
2971 | /* From mount should not have locked children in place of To's root */ |
2972 | if (has_locked_children(mnt: from, dentry: to->mnt.mnt_root)) |
2973 | goto out; |
2974 | |
2975 | /* Setting sharing groups is only allowed on private mounts */ |
2976 | if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) |
2977 | goto out; |
2978 | |
2979 | /* From should not be private */ |
2980 | if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) |
2981 | goto out; |
2982 | |
2983 | if (IS_MNT_SLAVE(from)) { |
2984 | struct mount *m = from->mnt_master; |
2985 | |
2986 | list_add(new: &to->mnt_slave, head: &m->mnt_slave_list); |
2987 | to->mnt_master = m; |
2988 | } |
2989 | |
2990 | if (IS_MNT_SHARED(from)) { |
2991 | to->mnt_group_id = from->mnt_group_id; |
2992 | list_add(new: &to->mnt_share, head: &from->mnt_share); |
2993 | lock_mount_hash(); |
2994 | set_mnt_shared(to); |
2995 | unlock_mount_hash(); |
2996 | } |
2997 | |
2998 | err = 0; |
2999 | out: |
3000 | namespace_unlock(); |
3001 | return err; |
3002 | } |
3003 | |
3004 | /** |
3005 | * path_overmounted - check if path is overmounted |
3006 | * @path: path to check |
3007 | * |
3008 | * Check if path is overmounted, i.e., if there's a mount on top of |
3009 | * @path->mnt with @path->dentry as mountpoint. |
3010 | * |
3011 | * Context: This function expects namespace_lock() to be held. |
3012 | * Return: If path is overmounted true is returned, false if not. |
3013 | */ |
3014 | static inline bool path_overmounted(const struct path *path) |
3015 | { |
3016 | rcu_read_lock(); |
3017 | if (unlikely(__lookup_mnt(path->mnt, path->dentry))) { |
3018 | rcu_read_unlock(); |
3019 | return true; |
3020 | } |
3021 | rcu_read_unlock(); |
3022 | return false; |
3023 | } |
3024 | |
3025 | /** |
3026 | * can_move_mount_beneath - check that we can mount beneath the top mount |
3027 | * @from: mount to mount beneath |
3028 | * @to: mount under which to mount |
3029 | * |
3030 | * - Make sure that @to->dentry is actually the root of a mount under |
3031 | * which we can mount another mount. |
3032 | * - Make sure that nothing can be mounted beneath the caller's current |
3033 | * root or the rootfs of the namespace. |
3034 | * - Make sure that the caller can unmount the topmost mount ensuring |
3035 | * that the caller could reveal the underlying mountpoint. |
3036 | * - Ensure that nothing has been mounted on top of @from before we |
3037 | * grabbed @namespace_sem to avoid creating pointless shadow mounts. |
3038 | * - Prevent mounting beneath a mount if the propagation relationship |
3039 | * between the source mount, parent mount, and top mount would lead to |
3040 | * nonsensical mount trees. |
3041 | * |
3042 | * Context: This function expects namespace_lock() to be held. |
3043 | * Return: On success 0, and on error a negative error code is returned. |
3044 | */ |
3045 | static int can_move_mount_beneath(const struct path *from, |
3046 | const struct path *to, |
3047 | const struct mountpoint *mp) |
3048 | { |
3049 | struct mount *mnt_from = real_mount(mnt: from->mnt), |
3050 | *mnt_to = real_mount(mnt: to->mnt), |
3051 | *parent_mnt_to = mnt_to->mnt_parent; |
3052 | |
3053 | if (!mnt_has_parent(mnt: mnt_to)) |
3054 | return -EINVAL; |
3055 | |
3056 | if (!path_mounted(path: to)) |
3057 | return -EINVAL; |
3058 | |
3059 | if (IS_MNT_LOCKED(mnt_to)) |
3060 | return -EINVAL; |
3061 | |
3062 | /* Avoid creating shadow mounts during mount propagation. */ |
3063 | if (path_overmounted(path: from)) |
3064 | return -EINVAL; |
3065 | |
3066 | /* |
3067 | * Mounting beneath the rootfs only makes sense when the |
3068 | * semantics of pivot_root(".", ".") are used. |
3069 | */ |
3070 | if (&mnt_to->mnt == current->fs->root.mnt) |
3071 | return -EINVAL; |
3072 | if (parent_mnt_to == current->nsproxy->mnt_ns->root) |
3073 | return -EINVAL; |
3074 | |
3075 | for (struct mount *p = mnt_from; mnt_has_parent(mnt: p); p = p->mnt_parent) |
3076 | if (p == mnt_to) |
3077 | return -EINVAL; |
3078 | |
3079 | /* |
3080 | * If the parent mount propagates to the child mount this would |
3081 | * mean mounting @mnt_from on @mnt_to->mnt_parent and then |
3082 | * propagating a copy @c of @mnt_from on top of @mnt_to. This |
3083 | * defeats the whole purpose of mounting beneath another mount. |
3084 | */ |
3085 | if (propagation_would_overmount(from: parent_mnt_to, to: mnt_to, mp)) |
3086 | return -EINVAL; |
3087 | |
3088 | /* |
3089 | * If @mnt_to->mnt_parent propagates to @mnt_from this would |
3090 | * mean propagating a copy @c of @mnt_from on top of @mnt_from. |
3091 | * Afterwards @mnt_from would be mounted on top of |
3092 | * @mnt_to->mnt_parent and @mnt_to would be unmounted from |
3093 | * @mnt->mnt_parent and remounted on @mnt_from. But since @c is |
3094 | * already mounted on @mnt_from, @mnt_to would ultimately be |
3095 | * remounted on top of @c. Afterwards, @mnt_from would be |
3096 | * covered by a copy @c of @mnt_from and @c would be covered by |
3097 | * @mnt_from itself. This defeats the whole purpose of mounting |
3098 | * @mnt_from beneath @mnt_to. |
3099 | */ |
3100 | if (propagation_would_overmount(from: parent_mnt_to, to: mnt_from, mp)) |
3101 | return -EINVAL; |
3102 | |
3103 | return 0; |
3104 | } |
3105 | |
3106 | static int do_move_mount(struct path *old_path, struct path *new_path, |
3107 | bool beneath) |
3108 | { |
3109 | struct mnt_namespace *ns; |
3110 | struct mount *p; |
3111 | struct mount *old; |
3112 | struct mount *parent; |
3113 | struct mountpoint *mp, *old_mp; |
3114 | int err; |
3115 | bool attached; |
3116 | enum mnt_tree_flags_t flags = 0; |
3117 | |
3118 | mp = do_lock_mount(path: new_path, beneath); |
3119 | if (IS_ERR(ptr: mp)) |
3120 | return PTR_ERR(ptr: mp); |
3121 | |
3122 | old = real_mount(mnt: old_path->mnt); |
3123 | p = real_mount(mnt: new_path->mnt); |
3124 | parent = old->mnt_parent; |
3125 | attached = mnt_has_parent(mnt: old); |
3126 | if (attached) |
3127 | flags |= MNT_TREE_MOVE; |
3128 | old_mp = old->mnt_mp; |
3129 | ns = old->mnt_ns; |
3130 | |
3131 | err = -EINVAL; |
3132 | /* The mountpoint must be in our namespace. */ |
3133 | if (!check_mnt(mnt: p)) |
3134 | goto out; |
3135 | |
3136 | /* The thing moved must be mounted... */ |
3137 | if (!is_mounted(mnt: &old->mnt)) |
3138 | goto out; |
3139 | |
3140 | /* ... and either ours or the root of anon namespace */ |
3141 | if (!(attached ? check_mnt(mnt: old) : is_anon_ns(ns))) |
3142 | goto out; |
3143 | |
3144 | if (old->mnt.mnt_flags & MNT_LOCKED) |
3145 | goto out; |
3146 | |
3147 | if (!path_mounted(path: old_path)) |
3148 | goto out; |
3149 | |
3150 | if (d_is_dir(dentry: new_path->dentry) != |
3151 | d_is_dir(dentry: old_path->dentry)) |
3152 | goto out; |
3153 | /* |
3154 | * Don't move a mount residing in a shared parent. |
3155 | */ |
3156 | if (attached && IS_MNT_SHARED(parent)) |
3157 | goto out; |
3158 | |
3159 | if (beneath) { |
3160 | err = can_move_mount_beneath(from: old_path, to: new_path, mp); |
3161 | if (err) |
3162 | goto out; |
3163 | |
3164 | err = -EINVAL; |
3165 | p = p->mnt_parent; |
3166 | flags |= MNT_TREE_BENEATH; |
3167 | } |
3168 | |
3169 | /* |
3170 | * Don't move a mount tree containing unbindable mounts to a destination |
3171 | * mount which is shared. |
3172 | */ |
3173 | if (IS_MNT_SHARED(p) && tree_contains_unbindable(mnt: old)) |
3174 | goto out; |
3175 | err = -ELOOP; |
3176 | if (!check_for_nsfs_mounts(subtree: old)) |
3177 | goto out; |
3178 | for (; mnt_has_parent(mnt: p); p = p->mnt_parent) |
3179 | if (p == old) |
3180 | goto out; |
3181 | |
3182 | err = attach_recursive_mnt(source_mnt: old, top_mnt: real_mount(mnt: new_path->mnt), dest_mp: mp, flags); |
3183 | if (err) |
3184 | goto out; |
3185 | |
3186 | /* if the mount is moved, it should no longer be expire |
3187 | * automatically */ |
3188 | list_del_init(entry: &old->mnt_expire); |
3189 | if (attached) |
3190 | put_mountpoint(mp: old_mp); |
3191 | out: |
3192 | unlock_mount(where: mp); |
3193 | if (!err) { |
3194 | if (attached) |
3195 | mntput_no_expire(mnt: parent); |
3196 | else |
3197 | free_mnt_ns(ns); |
3198 | } |
3199 | return err; |
3200 | } |
3201 | |
3202 | static int do_move_mount_old(struct path *path, const char *old_name) |
3203 | { |
3204 | struct path old_path; |
3205 | int err; |
3206 | |
3207 | if (!old_name || !*old_name) |
3208 | return -EINVAL; |
3209 | |
3210 | err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); |
3211 | if (err) |
3212 | return err; |
3213 | |
3214 | err = do_move_mount(old_path: &old_path, new_path: path, beneath: false); |
3215 | path_put(&old_path); |
3216 | return err; |
3217 | } |
3218 | |
3219 | /* |
3220 | * add a mount into a namespace's mount tree |
3221 | */ |
3222 | static int do_add_mount(struct mount *newmnt, struct mountpoint *mp, |
3223 | const struct path *path, int mnt_flags) |
3224 | { |
3225 | struct mount *parent = real_mount(mnt: path->mnt); |
3226 | |
3227 | mnt_flags &= ~MNT_INTERNAL_FLAGS; |
3228 | |
3229 | if (unlikely(!check_mnt(parent))) { |
3230 | /* that's acceptable only for automounts done in private ns */ |
3231 | if (!(mnt_flags & MNT_SHRINKABLE)) |
3232 | return -EINVAL; |
3233 | /* ... and for those we'd better have mountpoint still alive */ |
3234 | if (!parent->mnt_ns) |
3235 | return -EINVAL; |
3236 | } |
3237 | |
3238 | /* Refuse the same filesystem on the same mount point */ |
3239 | if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path)) |
3240 | return -EBUSY; |
3241 | |
3242 | if (d_is_symlink(dentry: newmnt->mnt.mnt_root)) |
3243 | return -EINVAL; |
3244 | |
3245 | newmnt->mnt.mnt_flags = mnt_flags; |
3246 | return graft_tree(mnt: newmnt, p: parent, mp); |
3247 | } |
3248 | |
3249 | static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags); |
3250 | |
3251 | /* |
3252 | * Create a new mount using a superblock configuration and request it |
3253 | * be added to the namespace tree. |
3254 | */ |
3255 | static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, |
3256 | unsigned int mnt_flags) |
3257 | { |
3258 | struct vfsmount *mnt; |
3259 | struct mountpoint *mp; |
3260 | struct super_block *sb = fc->root->d_sb; |
3261 | int error; |
3262 | |
3263 | error = security_sb_kern_mount(sb); |
3264 | if (!error && mount_too_revealing(sb, new_mnt_flags: &mnt_flags)) |
3265 | error = -EPERM; |
3266 | |
3267 | if (unlikely(error)) { |
3268 | fc_drop_locked(fc); |
3269 | return error; |
3270 | } |
3271 | |
3272 | up_write(sem: &sb->s_umount); |
3273 | |
3274 | mnt = vfs_create_mount(fc); |
3275 | if (IS_ERR(ptr: mnt)) |
3276 | return PTR_ERR(ptr: mnt); |
3277 | |
3278 | mnt_warn_timestamp_expiry(mountpoint, mnt); |
3279 | |
3280 | mp = lock_mount(path: mountpoint); |
3281 | if (IS_ERR(ptr: mp)) { |
3282 | mntput(mnt); |
3283 | return PTR_ERR(ptr: mp); |
3284 | } |
3285 | error = do_add_mount(newmnt: real_mount(mnt), mp, path: mountpoint, mnt_flags); |
3286 | unlock_mount(where: mp); |
3287 | if (error < 0) |
3288 | mntput(mnt); |
3289 | return error; |
3290 | } |
3291 | |
3292 | /* |
3293 | * create a new mount for userspace and request it to be added into the |
3294 | * namespace's tree |
3295 | */ |
3296 | static int do_new_mount(struct path *path, const char *fstype, int sb_flags, |
3297 | int mnt_flags, const char *name, void *data) |
3298 | { |
3299 | struct file_system_type *type; |
3300 | struct fs_context *fc; |
3301 | const char *subtype = NULL; |
3302 | int err = 0; |
3303 | |
3304 | if (!fstype) |
3305 | return -EINVAL; |
3306 | |
3307 | type = get_fs_type(name: fstype); |
3308 | if (!type) |
3309 | return -ENODEV; |
3310 | |
3311 | if (type->fs_flags & FS_HAS_SUBTYPE) { |
3312 | subtype = strchr(fstype, '.'); |
3313 | if (subtype) { |
3314 | subtype++; |
3315 | if (!*subtype) { |
3316 | put_filesystem(fs: type); |
3317 | return -EINVAL; |
3318 | } |
3319 | } |
3320 | } |
3321 | |
3322 | fc = fs_context_for_mount(fs_type: type, sb_flags); |
3323 | put_filesystem(fs: type); |
3324 | if (IS_ERR(ptr: fc)) |
3325 | return PTR_ERR(ptr: fc); |
3326 | |
3327 | if (subtype) |
3328 | err = vfs_parse_fs_string(fc, key: "subtype" , |
3329 | value: subtype, strlen(subtype)); |
3330 | if (!err && name) |
3331 | err = vfs_parse_fs_string(fc, key: "source" , value: name, strlen(name)); |
3332 | if (!err) |
3333 | err = parse_monolithic_mount_data(fc, data); |
3334 | if (!err && !mount_capable(fc)) |
3335 | err = -EPERM; |
3336 | if (!err) |
3337 | err = vfs_get_tree(fc); |
3338 | if (!err) |
3339 | err = do_new_mount_fc(fc, mountpoint: path, mnt_flags); |
3340 | |
3341 | put_fs_context(fc); |
3342 | return err; |
3343 | } |
3344 | |
3345 | int finish_automount(struct vfsmount *m, const struct path *path) |
3346 | { |
3347 | struct dentry *dentry = path->dentry; |
3348 | struct mountpoint *mp; |
3349 | struct mount *mnt; |
3350 | int err; |
3351 | |
3352 | if (!m) |
3353 | return 0; |
3354 | if (IS_ERR(ptr: m)) |
3355 | return PTR_ERR(ptr: m); |
3356 | |
3357 | mnt = real_mount(mnt: m); |
3358 | /* The new mount record should have at least 2 refs to prevent it being |
3359 | * expired before we get a chance to add it |
3360 | */ |
3361 | BUG_ON(mnt_get_count(mnt) < 2); |
3362 | |
3363 | if (m->mnt_sb == path->mnt->mnt_sb && |
3364 | m->mnt_root == dentry) { |
3365 | err = -ELOOP; |
3366 | goto discard; |
3367 | } |
3368 | |
3369 | /* |
3370 | * we don't want to use lock_mount() - in this case finding something |
3371 | * that overmounts our mountpoint to be means "quitely drop what we've |
3372 | * got", not "try to mount it on top". |
3373 | */ |
3374 | inode_lock(inode: dentry->d_inode); |
3375 | namespace_lock(); |
3376 | if (unlikely(cant_mount(dentry))) { |
3377 | err = -ENOENT; |
3378 | goto discard_locked; |
3379 | } |
3380 | if (path_overmounted(path)) { |
3381 | err = 0; |
3382 | goto discard_locked; |
3383 | } |
3384 | mp = get_mountpoint(dentry); |
3385 | if (IS_ERR(ptr: mp)) { |
3386 | err = PTR_ERR(ptr: mp); |
3387 | goto discard_locked; |
3388 | } |
3389 | |
3390 | err = do_add_mount(newmnt: mnt, mp, path, mnt_flags: path->mnt->mnt_flags | MNT_SHRINKABLE); |
3391 | unlock_mount(where: mp); |
3392 | if (unlikely(err)) |
3393 | goto discard; |
3394 | mntput(m); |
3395 | return 0; |
3396 | |
3397 | discard_locked: |
3398 | namespace_unlock(); |
3399 | inode_unlock(inode: dentry->d_inode); |
3400 | discard: |
3401 | /* remove m from any expiration list it may be on */ |
3402 | if (!list_empty(head: &mnt->mnt_expire)) { |
3403 | namespace_lock(); |
3404 | list_del_init(entry: &mnt->mnt_expire); |
3405 | namespace_unlock(); |
3406 | } |
3407 | mntput(m); |
3408 | mntput(m); |
3409 | return err; |
3410 | } |
3411 | |
3412 | /** |
3413 | * mnt_set_expiry - Put a mount on an expiration list |
3414 | * @mnt: The mount to list. |
3415 | * @expiry_list: The list to add the mount to. |
3416 | */ |
3417 | void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) |
3418 | { |
3419 | namespace_lock(); |
3420 | |
3421 | list_add_tail(new: &real_mount(mnt)->mnt_expire, head: expiry_list); |
3422 | |
3423 | namespace_unlock(); |
3424 | } |
3425 | EXPORT_SYMBOL(mnt_set_expiry); |
3426 | |
3427 | /* |
3428 | * process a list of expirable mountpoints with the intent of discarding any |
3429 | * mountpoints that aren't in use and haven't been touched since last we came |
3430 | * here |
3431 | */ |
3432 | void mark_mounts_for_expiry(struct list_head *mounts) |
3433 | { |
3434 | struct mount *mnt, *next; |
3435 | LIST_HEAD(graveyard); |
3436 | |
3437 | if (list_empty(head: mounts)) |
3438 | return; |
3439 | |
3440 | namespace_lock(); |
3441 | lock_mount_hash(); |
3442 | |
3443 | /* extract from the expiration list every vfsmount that matches the |
3444 | * following criteria: |
3445 | * - only referenced by its parent vfsmount |
3446 | * - still marked for expiry (marked on the last call here; marks are |
3447 | * cleared by mntput()) |
3448 | */ |
3449 | list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { |
3450 | if (!xchg(&mnt->mnt_expiry_mark, 1) || |
3451 | propagate_mount_busy(mnt, 1)) |
3452 | continue; |
3453 | list_move(list: &mnt->mnt_expire, head: &graveyard); |
3454 | } |
3455 | while (!list_empty(head: &graveyard)) { |
3456 | mnt = list_first_entry(&graveyard, struct mount, mnt_expire); |
3457 | touch_mnt_namespace(ns: mnt->mnt_ns); |
3458 | umount_tree(mnt, how: UMOUNT_PROPAGATE|UMOUNT_SYNC); |
3459 | } |
3460 | unlock_mount_hash(); |
3461 | namespace_unlock(); |
3462 | } |
3463 | |
3464 | EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); |
3465 | |
3466 | /* |
3467 | * Ripoff of 'select_parent()' |
3468 | * |
3469 | * search the list of submounts for a given mountpoint, and move any |
3470 | * shrinkable submounts to the 'graveyard' list. |
3471 | */ |
3472 | static int select_submounts(struct mount *parent, struct list_head *graveyard) |
3473 | { |
3474 | struct mount *this_parent = parent; |
3475 | struct list_head *next; |
3476 | int found = 0; |
3477 | |
3478 | repeat: |
3479 | next = this_parent->mnt_mounts.next; |
3480 | resume: |
3481 | while (next != &this_parent->mnt_mounts) { |
3482 | struct list_head *tmp = next; |
3483 | struct mount *mnt = list_entry(tmp, struct mount, mnt_child); |
3484 | |
3485 | next = tmp->next; |
3486 | if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) |
3487 | continue; |
3488 | /* |
3489 | * Descend a level if the d_mounts list is non-empty. |
3490 | */ |
3491 | if (!list_empty(head: &mnt->mnt_mounts)) { |
3492 | this_parent = mnt; |
3493 | goto repeat; |
3494 | } |
3495 | |
3496 | if (!propagate_mount_busy(mnt, 1)) { |
3497 | list_move_tail(list: &mnt->mnt_expire, head: graveyard); |
3498 | found++; |
3499 | } |
3500 | } |
3501 | /* |
3502 | * All done at this level ... ascend and resume the search |
3503 | */ |
3504 | if (this_parent != parent) { |
3505 | next = this_parent->mnt_child.next; |
3506 | this_parent = this_parent->mnt_parent; |
3507 | goto resume; |
3508 | } |
3509 | return found; |
3510 | } |
3511 | |
3512 | /* |
3513 | * process a list of expirable mountpoints with the intent of discarding any |
3514 | * submounts of a specific parent mountpoint |
3515 | * |
3516 | * mount_lock must be held for write |
3517 | */ |
3518 | static void shrink_submounts(struct mount *mnt) |
3519 | { |
3520 | LIST_HEAD(graveyard); |
3521 | struct mount *m; |
3522 | |
3523 | /* extract submounts of 'mountpoint' from the expiration list */ |
3524 | while (select_submounts(parent: mnt, graveyard: &graveyard)) { |
3525 | while (!list_empty(head: &graveyard)) { |
3526 | m = list_first_entry(&graveyard, struct mount, |
3527 | mnt_expire); |
3528 | touch_mnt_namespace(ns: m->mnt_ns); |
3529 | umount_tree(mnt: m, how: UMOUNT_PROPAGATE|UMOUNT_SYNC); |
3530 | } |
3531 | } |
3532 | } |
3533 | |
3534 | static void *copy_mount_options(const void __user * data) |
3535 | { |
3536 | char *copy; |
3537 | unsigned left, offset; |
3538 | |
3539 | if (!data) |
3540 | return NULL; |
3541 | |
3542 | copy = kmalloc(PAGE_SIZE, GFP_KERNEL); |
3543 | if (!copy) |
3544 | return ERR_PTR(error: -ENOMEM); |
3545 | |
3546 | left = copy_from_user(to: copy, from: data, PAGE_SIZE); |
3547 | |
3548 | /* |
3549 | * Not all architectures have an exact copy_from_user(). Resort to |
3550 | * byte at a time. |
3551 | */ |
3552 | offset = PAGE_SIZE - left; |
3553 | while (left) { |
3554 | char c; |
3555 | if (get_user(c, (const char __user *)data + offset)) |
3556 | break; |
3557 | copy[offset] = c; |
3558 | left--; |
3559 | offset++; |
3560 | } |
3561 | |
3562 | if (left == PAGE_SIZE) { |
3563 | kfree(objp: copy); |
3564 | return ERR_PTR(error: -EFAULT); |
3565 | } |
3566 | |
3567 | return copy; |
3568 | } |
3569 | |
3570 | static char *copy_mount_string(const void __user *data) |
3571 | { |
3572 | return data ? strndup_user(data, PATH_MAX) : NULL; |
3573 | } |
3574 | |
3575 | /* |
3576 | * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to |
3577 | * be given to the mount() call (ie: read-only, no-dev, no-suid etc). |
3578 | * |
3579 | * data is a (void *) that can point to any structure up to |
3580 | * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent |
3581 | * information (or be NULL). |
3582 | * |
3583 | * Pre-0.97 versions of mount() didn't have a flags word. |
3584 | * When the flags word was introduced its top half was required |
3585 | * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. |
3586 | * Therefore, if this magic number is present, it carries no information |
3587 | * and must be discarded. |
3588 | */ |
3589 | int path_mount(const char *dev_name, struct path *path, |
3590 | const char *type_page, unsigned long flags, void *data_page) |
3591 | { |
3592 | unsigned int mnt_flags = 0, sb_flags; |
3593 | int ret; |
3594 | |
3595 | /* Discard magic */ |
3596 | if ((flags & MS_MGC_MSK) == MS_MGC_VAL) |
3597 | flags &= ~MS_MGC_MSK; |
3598 | |
3599 | /* Basic sanity checks */ |
3600 | if (data_page) |
3601 | ((char *)data_page)[PAGE_SIZE - 1] = 0; |
3602 | |
3603 | if (flags & MS_NOUSER) |
3604 | return -EINVAL; |
3605 | |
3606 | ret = security_sb_mount(dev_name, path, type: type_page, flags, data: data_page); |
3607 | if (ret) |
3608 | return ret; |
3609 | if (!may_mount()) |
3610 | return -EPERM; |
3611 | if (flags & SB_MANDLOCK) |
3612 | warn_mandlock(); |
3613 | |
3614 | /* Default to relatime unless overriden */ |
3615 | if (!(flags & MS_NOATIME)) |
3616 | mnt_flags |= MNT_RELATIME; |
3617 | |
3618 | /* Separate the per-mountpoint flags */ |
3619 | if (flags & MS_NOSUID) |
3620 | mnt_flags |= MNT_NOSUID; |
3621 | if (flags & MS_NODEV) |
3622 | mnt_flags |= MNT_NODEV; |
3623 | if (flags & MS_NOEXEC) |
3624 | mnt_flags |= MNT_NOEXEC; |
3625 | if (flags & MS_NOATIME) |
3626 | mnt_flags |= MNT_NOATIME; |
3627 | if (flags & MS_NODIRATIME) |
3628 | mnt_flags |= MNT_NODIRATIME; |
3629 | if (flags & MS_STRICTATIME) |
3630 | mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); |
3631 | if (flags & MS_RDONLY) |
3632 | mnt_flags |= MNT_READONLY; |
3633 | if (flags & MS_NOSYMFOLLOW) |
3634 | mnt_flags |= MNT_NOSYMFOLLOW; |
3635 | |
3636 | /* The default atime for remount is preservation */ |
3637 | if ((flags & MS_REMOUNT) && |
3638 | ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | |
3639 | MS_STRICTATIME)) == 0)) { |
3640 | mnt_flags &= ~MNT_ATIME_MASK; |
3641 | mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK; |
3642 | } |
3643 | |
3644 | sb_flags = flags & (SB_RDONLY | |
3645 | SB_SYNCHRONOUS | |
3646 | SB_MANDLOCK | |
3647 | SB_DIRSYNC | |
3648 | SB_SILENT | |
3649 | SB_POSIXACL | |
3650 | SB_LAZYTIME | |
3651 | SB_I_VERSION); |
3652 | |
3653 | if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) |
3654 | return do_reconfigure_mnt(path, mnt_flags); |
3655 | if (flags & MS_REMOUNT) |
3656 | return do_remount(path, ms_flags: flags, sb_flags, mnt_flags, data: data_page); |
3657 | if (flags & MS_BIND) |
3658 | return do_loopback(path, old_name: dev_name, recurse: flags & MS_REC); |
3659 | if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) |
3660 | return do_change_type(path, ms_flags: flags); |
3661 | if (flags & MS_MOVE) |
3662 | return do_move_mount_old(path, old_name: dev_name); |
3663 | |
3664 | return do_new_mount(path, fstype: type_page, sb_flags, mnt_flags, name: dev_name, |
3665 | data: data_page); |
3666 | } |
3667 | |
3668 | long do_mount(const char *dev_name, const char __user *dir_name, |
3669 | const char *type_page, unsigned long flags, void *data_page) |
3670 | { |
3671 | struct path path; |
3672 | int ret; |
3673 | |
3674 | ret = user_path_at(AT_FDCWD, name: dir_name, LOOKUP_FOLLOW, path: &path); |
3675 | if (ret) |
3676 | return ret; |
3677 | ret = path_mount(dev_name, path: &path, type_page, flags, data_page); |
3678 | path_put(&path); |
3679 | return ret; |
3680 | } |
3681 | |
3682 | static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) |
3683 | { |
3684 | return inc_ucount(ns, current_euid(), type: UCOUNT_MNT_NAMESPACES); |
3685 | } |
3686 | |
3687 | static void dec_mnt_namespaces(struct ucounts *ucounts) |
3688 | { |
3689 | dec_ucount(ucounts, type: UCOUNT_MNT_NAMESPACES); |
3690 | } |
3691 | |
3692 | static void free_mnt_ns(struct mnt_namespace *ns) |
3693 | { |
3694 | if (!is_anon_ns(ns)) |
3695 | ns_free_inum(&ns->ns); |
3696 | dec_mnt_namespaces(ucounts: ns->ucounts); |
3697 | put_user_ns(ns: ns->user_ns); |
3698 | kfree(objp: ns); |
3699 | } |
3700 | |
3701 | /* |
3702 | * Assign a sequence number so we can detect when we attempt to bind |
3703 | * mount a reference to an older mount namespace into the current |
3704 | * mount namespace, preventing reference counting loops. A 64bit |
3705 | * number incrementing at 10Ghz will take 12,427 years to wrap which |
3706 | * is effectively never, so we can ignore the possibility. |
3707 | */ |
3708 | static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); |
3709 | |
3710 | static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon) |
3711 | { |
3712 | struct mnt_namespace *new_ns; |
3713 | struct ucounts *ucounts; |
3714 | int ret; |
3715 | |
3716 | ucounts = inc_mnt_namespaces(ns: user_ns); |
3717 | if (!ucounts) |
3718 | return ERR_PTR(error: -ENOSPC); |
3719 | |
3720 | new_ns = kzalloc(size: sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT); |
3721 | if (!new_ns) { |
3722 | dec_mnt_namespaces(ucounts); |
3723 | return ERR_PTR(error: -ENOMEM); |
3724 | } |
3725 | if (!anon) { |
3726 | ret = ns_alloc_inum(ns: &new_ns->ns); |
3727 | if (ret) { |
3728 | kfree(objp: new_ns); |
3729 | dec_mnt_namespaces(ucounts); |
3730 | return ERR_PTR(error: ret); |
3731 | } |
3732 | } |
3733 | new_ns->ns.ops = &mntns_operations; |
3734 | if (!anon) |
3735 | new_ns->seq = atomic64_add_return(i: 1, v: &mnt_ns_seq); |
3736 | refcount_set(r: &new_ns->ns.count, n: 1); |
3737 | INIT_LIST_HEAD(list: &new_ns->list); |
3738 | init_waitqueue_head(&new_ns->poll); |
3739 | spin_lock_init(&new_ns->ns_lock); |
3740 | new_ns->user_ns = get_user_ns(ns: user_ns); |
3741 | new_ns->ucounts = ucounts; |
3742 | return new_ns; |
3743 | } |
3744 | |
3745 | __latent_entropy |
3746 | struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, |
3747 | struct user_namespace *user_ns, struct fs_struct *new_fs) |
3748 | { |
3749 | struct mnt_namespace *new_ns; |
3750 | struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; |
3751 | struct mount *p, *q; |
3752 | struct mount *old; |
3753 | struct mount *new; |
3754 | int copy_flags; |
3755 | |
3756 | BUG_ON(!ns); |
3757 | |
3758 | if (likely(!(flags & CLONE_NEWNS))) { |
3759 | get_mnt_ns(ns); |
3760 | return ns; |
3761 | } |
3762 | |
3763 | old = ns->root; |
3764 | |
3765 | new_ns = alloc_mnt_ns(user_ns, anon: false); |
3766 | if (IS_ERR(ptr: new_ns)) |
3767 | return new_ns; |
3768 | |
3769 | namespace_lock(); |
3770 | /* First pass: copy the tree topology */ |
3771 | copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; |
3772 | if (user_ns != ns->user_ns) |
3773 | copy_flags |= CL_SHARED_TO_SLAVE; |
3774 | new = copy_tree(mnt: old, dentry: old->mnt.mnt_root, flag: copy_flags); |
3775 | if (IS_ERR(ptr: new)) { |
3776 | namespace_unlock(); |
3777 | free_mnt_ns(ns: new_ns); |
3778 | return ERR_CAST(ptr: new); |
3779 | } |
3780 | if (user_ns != ns->user_ns) { |
3781 | lock_mount_hash(); |
3782 | lock_mnt_tree(mnt: new); |
3783 | unlock_mount_hash(); |
3784 | } |
3785 | new_ns->root = new; |
3786 | list_add_tail(new: &new_ns->list, head: &new->mnt_list); |
3787 | |
3788 | /* |
3789 | * Second pass: switch the tsk->fs->* elements and mark new vfsmounts |
3790 | * as belonging to new namespace. We have already acquired a private |
3791 | * fs_struct, so tsk->fs->lock is not needed. |
3792 | */ |
3793 | p = old; |
3794 | q = new; |
3795 | while (p) { |
3796 | q->mnt_ns = new_ns; |
3797 | new_ns->mounts++; |
3798 | if (new_fs) { |
3799 | if (&p->mnt == new_fs->root.mnt) { |
3800 | new_fs->root.mnt = mntget(&q->mnt); |
3801 | rootmnt = &p->mnt; |
3802 | } |
3803 | if (&p->mnt == new_fs->pwd.mnt) { |
3804 | new_fs->pwd.mnt = mntget(&q->mnt); |
3805 | pwdmnt = &p->mnt; |
3806 | } |
3807 | } |
3808 | p = next_mnt(p, root: old); |
3809 | q = next_mnt(p: q, root: new); |
3810 | if (!q) |
3811 | break; |
3812 | // an mntns binding we'd skipped? |
3813 | while (p->mnt.mnt_root != q->mnt.mnt_root) |
3814 | p = next_mnt(p: skip_mnt_tree(p), root: old); |
3815 | } |
3816 | namespace_unlock(); |
3817 | |
3818 | if (rootmnt) |
3819 | mntput(rootmnt); |
3820 | if (pwdmnt) |
3821 | mntput(pwdmnt); |
3822 | |
3823 | return new_ns; |
3824 | } |
3825 | |
3826 | struct dentry *mount_subtree(struct vfsmount *m, const char *name) |
3827 | { |
3828 | struct mount *mnt = real_mount(mnt: m); |
3829 | struct mnt_namespace *ns; |
3830 | struct super_block *s; |
3831 | struct path path; |
3832 | int err; |
3833 | |
3834 | ns = alloc_mnt_ns(user_ns: &init_user_ns, anon: true); |
3835 | if (IS_ERR(ptr: ns)) { |
3836 | mntput(m); |
3837 | return ERR_CAST(ptr: ns); |
3838 | } |
3839 | mnt->mnt_ns = ns; |
3840 | ns->root = mnt; |
3841 | ns->mounts++; |
3842 | list_add(new: &mnt->mnt_list, head: &ns->list); |
3843 | |
3844 | err = vfs_path_lookup(m->mnt_root, m, |
3845 | name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); |
3846 | |
3847 | put_mnt_ns(ns); |
3848 | |
3849 | if (err) |
3850 | return ERR_PTR(error: err); |
3851 | |
3852 | /* trade a vfsmount reference for active sb one */ |
3853 | s = path.mnt->mnt_sb; |
3854 | atomic_inc(v: &s->s_active); |
3855 | mntput(path.mnt); |
3856 | /* lock the sucker */ |
3857 | down_write(sem: &s->s_umount); |
3858 | /* ... and return the root of (sub)tree on it */ |
3859 | return path.dentry; |
3860 | } |
3861 | EXPORT_SYMBOL(mount_subtree); |
3862 | |
3863 | SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, |
3864 | char __user *, type, unsigned long, flags, void __user *, data) |
3865 | { |
3866 | int ret; |
3867 | char *kernel_type; |
3868 | char *kernel_dev; |
3869 | void *options; |
3870 | |
3871 | kernel_type = copy_mount_string(data: type); |
3872 | ret = PTR_ERR(ptr: kernel_type); |
3873 | if (IS_ERR(ptr: kernel_type)) |
3874 | goto out_type; |
3875 | |
3876 | kernel_dev = copy_mount_string(data: dev_name); |
3877 | ret = PTR_ERR(ptr: kernel_dev); |
3878 | if (IS_ERR(ptr: kernel_dev)) |
3879 | goto out_dev; |
3880 | |
3881 | options = copy_mount_options(data); |
3882 | ret = PTR_ERR(ptr: options); |
3883 | if (IS_ERR(ptr: options)) |
3884 | goto out_data; |
3885 | |
3886 | ret = do_mount(dev_name: kernel_dev, dir_name, type_page: kernel_type, flags, data_page: options); |
3887 | |
3888 | kfree(objp: options); |
3889 | out_data: |
3890 | kfree(objp: kernel_dev); |
3891 | out_dev: |
3892 | kfree(objp: kernel_type); |
3893 | out_type: |
3894 | return ret; |
3895 | } |
3896 | |
3897 | #define FSMOUNT_VALID_FLAGS \ |
3898 | (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \ |
3899 | MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \ |
3900 | MOUNT_ATTR_NOSYMFOLLOW) |
3901 | |
3902 | #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP) |
3903 | |
3904 | #define MOUNT_SETATTR_PROPAGATION_FLAGS \ |
3905 | (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED) |
3906 | |
3907 | static unsigned int attr_flags_to_mnt_flags(u64 attr_flags) |
3908 | { |
3909 | unsigned int mnt_flags = 0; |
3910 | |
3911 | if (attr_flags & MOUNT_ATTR_RDONLY) |
3912 | mnt_flags |= MNT_READONLY; |
3913 | if (attr_flags & MOUNT_ATTR_NOSUID) |
3914 | mnt_flags |= MNT_NOSUID; |
3915 | if (attr_flags & MOUNT_ATTR_NODEV) |
3916 | mnt_flags |= MNT_NODEV; |
3917 | if (attr_flags & MOUNT_ATTR_NOEXEC) |
3918 | mnt_flags |= MNT_NOEXEC; |
3919 | if (attr_flags & MOUNT_ATTR_NODIRATIME) |
3920 | mnt_flags |= MNT_NODIRATIME; |
3921 | if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW) |
3922 | mnt_flags |= MNT_NOSYMFOLLOW; |
3923 | |
3924 | return mnt_flags; |
3925 | } |
3926 | |
3927 | /* |
3928 | * Create a kernel mount representation for a new, prepared superblock |
3929 | * (specified by fs_fd) and attach to an open_tree-like file descriptor. |
3930 | */ |
3931 | SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags, |
3932 | unsigned int, attr_flags) |
3933 | { |
3934 | struct mnt_namespace *ns; |
3935 | struct fs_context *fc; |
3936 | struct file *file; |
3937 | struct path newmount; |
3938 | struct mount *mnt; |
3939 | struct fd f; |
3940 | unsigned int mnt_flags = 0; |
3941 | long ret; |
3942 | |
3943 | if (!may_mount()) |
3944 | return -EPERM; |
3945 | |
3946 | if ((flags & ~(FSMOUNT_CLOEXEC)) != 0) |
3947 | return -EINVAL; |
3948 | |
3949 | if (attr_flags & ~FSMOUNT_VALID_FLAGS) |
3950 | return -EINVAL; |
3951 | |
3952 | mnt_flags = attr_flags_to_mnt_flags(attr_flags); |
3953 | |
3954 | switch (attr_flags & MOUNT_ATTR__ATIME) { |
3955 | case MOUNT_ATTR_STRICTATIME: |
3956 | break; |
3957 | case MOUNT_ATTR_NOATIME: |
3958 | mnt_flags |= MNT_NOATIME; |
3959 | break; |
3960 | case MOUNT_ATTR_RELATIME: |
3961 | mnt_flags |= MNT_RELATIME; |
3962 | break; |
3963 | default: |
3964 | return -EINVAL; |
3965 | } |
3966 | |
3967 | f = fdget(fd: fs_fd); |
3968 | if (!f.file) |
3969 | return -EBADF; |
3970 | |
3971 | ret = -EINVAL; |
3972 | if (f.file->f_op != &fscontext_fops) |
3973 | goto err_fsfd; |
3974 | |
3975 | fc = f.file->private_data; |
3976 | |
3977 | ret = mutex_lock_interruptible(&fc->uapi_mutex); |
3978 | if (ret < 0) |
3979 | goto err_fsfd; |
3980 | |
3981 | /* There must be a valid superblock or we can't mount it */ |
3982 | ret = -EINVAL; |
3983 | if (!fc->root) |
3984 | goto err_unlock; |
3985 | |
3986 | ret = -EPERM; |
3987 | if (mount_too_revealing(sb: fc->root->d_sb, new_mnt_flags: &mnt_flags)) { |
3988 | pr_warn("VFS: Mount too revealing\n" ); |
3989 | goto err_unlock; |
3990 | } |
3991 | |
3992 | ret = -EBUSY; |
3993 | if (fc->phase != FS_CONTEXT_AWAITING_MOUNT) |
3994 | goto err_unlock; |
3995 | |
3996 | if (fc->sb_flags & SB_MANDLOCK) |
3997 | warn_mandlock(); |
3998 | |
3999 | newmount.mnt = vfs_create_mount(fc); |
4000 | if (IS_ERR(ptr: newmount.mnt)) { |
4001 | ret = PTR_ERR(ptr: newmount.mnt); |
4002 | goto err_unlock; |
4003 | } |
4004 | newmount.dentry = dget(dentry: fc->root); |
4005 | newmount.mnt->mnt_flags = mnt_flags; |
4006 | |
4007 | /* We've done the mount bit - now move the file context into more or |
4008 | * less the same state as if we'd done an fspick(). We don't want to |
4009 | * do any memory allocation or anything like that at this point as we |
4010 | * don't want to have to handle any errors incurred. |
4011 | */ |
4012 | vfs_clean_context(fc); |
4013 | |
4014 | ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, anon: true); |
4015 | if (IS_ERR(ptr: ns)) { |
4016 | ret = PTR_ERR(ptr: ns); |
4017 | goto err_path; |
4018 | } |
4019 | mnt = real_mount(mnt: newmount.mnt); |
4020 | mnt->mnt_ns = ns; |
4021 | ns->root = mnt; |
4022 | ns->mounts = 1; |
4023 | list_add(new: &mnt->mnt_list, head: &ns->list); |
4024 | mntget(newmount.mnt); |
4025 | |
4026 | /* Attach to an apparent O_PATH fd with a note that we need to unmount |
4027 | * it, not just simply put it. |
4028 | */ |
4029 | file = dentry_open(path: &newmount, O_PATH, creds: fc->cred); |
4030 | if (IS_ERR(ptr: file)) { |
4031 | dissolve_on_fput(mnt: newmount.mnt); |
4032 | ret = PTR_ERR(ptr: file); |
4033 | goto err_path; |
4034 | } |
4035 | file->f_mode |= FMODE_NEED_UNMOUNT; |
4036 | |
4037 | ret = get_unused_fd_flags(flags: (flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0); |
4038 | if (ret >= 0) |
4039 | fd_install(fd: ret, file); |
4040 | else |
4041 | fput(file); |
4042 | |
4043 | err_path: |
4044 | path_put(&newmount); |
4045 | err_unlock: |
4046 | mutex_unlock(lock: &fc->uapi_mutex); |
4047 | err_fsfd: |
4048 | fdput(fd: f); |
4049 | return ret; |
4050 | } |
4051 | |
4052 | /* |
4053 | * Move a mount from one place to another. In combination with |
4054 | * fsopen()/fsmount() this is used to install a new mount and in combination |
4055 | * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy |
4056 | * a mount subtree. |
4057 | * |
4058 | * Note the flags value is a combination of MOVE_MOUNT_* flags. |
4059 | */ |
4060 | SYSCALL_DEFINE5(move_mount, |
4061 | int, from_dfd, const char __user *, from_pathname, |
4062 | int, to_dfd, const char __user *, to_pathname, |
4063 | unsigned int, flags) |
4064 | { |
4065 | struct path from_path, to_path; |
4066 | unsigned int lflags; |
4067 | int ret = 0; |
4068 | |
4069 | if (!may_mount()) |
4070 | return -EPERM; |
4071 | |
4072 | if (flags & ~MOVE_MOUNT__MASK) |
4073 | return -EINVAL; |
4074 | |
4075 | if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) == |
4076 | (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) |
4077 | return -EINVAL; |
4078 | |
4079 | /* If someone gives a pathname, they aren't permitted to move |
4080 | * from an fd that requires unmount as we can't get at the flag |
4081 | * to clear it afterwards. |
4082 | */ |
4083 | lflags = 0; |
4084 | if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW; |
4085 | if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; |
4086 | if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY; |
4087 | |
4088 | ret = user_path_at(dfd: from_dfd, name: from_pathname, flags: lflags, path: &from_path); |
4089 | if (ret < 0) |
4090 | return ret; |
4091 | |
4092 | lflags = 0; |
4093 | if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW; |
4094 | if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; |
4095 | if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY; |
4096 | |
4097 | ret = user_path_at(dfd: to_dfd, name: to_pathname, flags: lflags, path: &to_path); |
4098 | if (ret < 0) |
4099 | goto out_from; |
4100 | |
4101 | ret = security_move_mount(from_path: &from_path, to_path: &to_path); |
4102 | if (ret < 0) |
4103 | goto out_to; |
4104 | |
4105 | if (flags & MOVE_MOUNT_SET_GROUP) |
4106 | ret = do_set_group(from_path: &from_path, to_path: &to_path); |
4107 | else |
4108 | ret = do_move_mount(old_path: &from_path, new_path: &to_path, |
4109 | beneath: (flags & MOVE_MOUNT_BENEATH)); |
4110 | |
4111 | out_to: |
4112 | path_put(&to_path); |
4113 | out_from: |
4114 | path_put(&from_path); |
4115 | return ret; |
4116 | } |
4117 | |
4118 | /* |
4119 | * Return true if path is reachable from root |
4120 | * |
4121 | * namespace_sem or mount_lock is held |
4122 | */ |
4123 | bool is_path_reachable(struct mount *mnt, struct dentry *dentry, |
4124 | const struct path *root) |
4125 | { |
4126 | while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { |
4127 | dentry = mnt->mnt_mountpoint; |
4128 | mnt = mnt->mnt_parent; |
4129 | } |
4130 | return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); |
4131 | } |
4132 | |
4133 | bool path_is_under(const struct path *path1, const struct path *path2) |
4134 | { |
4135 | bool res; |
4136 | read_seqlock_excl(sl: &mount_lock); |
4137 | res = is_path_reachable(mnt: real_mount(mnt: path1->mnt), dentry: path1->dentry, root: path2); |
4138 | read_sequnlock_excl(sl: &mount_lock); |
4139 | return res; |
4140 | } |
4141 | EXPORT_SYMBOL(path_is_under); |
4142 | |
4143 | /* |
4144 | * pivot_root Semantics: |
4145 | * Moves the root file system of the current process to the directory put_old, |
4146 | * makes new_root as the new root file system of the current process, and sets |
4147 | * root/cwd of all processes which had them on the current root to new_root. |
4148 | * |
4149 | * Restrictions: |
4150 | * The new_root and put_old must be directories, and must not be on the |
4151 | * same file system as the current process root. The put_old must be |
4152 | * underneath new_root, i.e. adding a non-zero number of /.. to the string |
4153 | * pointed to by put_old must yield the same directory as new_root. No other |
4154 | * file system may be mounted on put_old. After all, new_root is a mountpoint. |
4155 | * |
4156 | * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. |
4157 | * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives |
4158 | * in this situation. |
4159 | * |
4160 | * Notes: |
4161 | * - we don't move root/cwd if they are not at the root (reason: if something |
4162 | * cared enough to change them, it's probably wrong to force them elsewhere) |
4163 | * - it's okay to pick a root that isn't the root of a file system, e.g. |
4164 | * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, |
4165 | * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root |
4166 | * first. |
4167 | */ |
4168 | SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, |
4169 | const char __user *, put_old) |
4170 | { |
4171 | struct path new, old, root; |
4172 | struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent; |
4173 | struct mountpoint *old_mp, *root_mp; |
4174 | int error; |
4175 | |
4176 | if (!may_mount()) |
4177 | return -EPERM; |
4178 | |
4179 | error = user_path_at(AT_FDCWD, name: new_root, |
4180 | LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path: &new); |
4181 | if (error) |
4182 | goto out0; |
4183 | |
4184 | error = user_path_at(AT_FDCWD, name: put_old, |
4185 | LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path: &old); |
4186 | if (error) |
4187 | goto out1; |
4188 | |
4189 | error = security_sb_pivotroot(old_path: &old, new_path: &new); |
4190 | if (error) |
4191 | goto out2; |
4192 | |
4193 | get_fs_root(current->fs, root: &root); |
4194 | old_mp = lock_mount(path: &old); |
4195 | error = PTR_ERR(ptr: old_mp); |
4196 | if (IS_ERR(ptr: old_mp)) |
4197 | goto out3; |
4198 | |
4199 | error = -EINVAL; |
4200 | new_mnt = real_mount(mnt: new.mnt); |
4201 | root_mnt = real_mount(mnt: root.mnt); |
4202 | old_mnt = real_mount(mnt: old.mnt); |
4203 | ex_parent = new_mnt->mnt_parent; |
4204 | root_parent = root_mnt->mnt_parent; |
4205 | if (IS_MNT_SHARED(old_mnt) || |
4206 | IS_MNT_SHARED(ex_parent) || |
4207 | IS_MNT_SHARED(root_parent)) |
4208 | goto out4; |
4209 | if (!check_mnt(mnt: root_mnt) || !check_mnt(mnt: new_mnt)) |
4210 | goto out4; |
4211 | if (new_mnt->mnt.mnt_flags & MNT_LOCKED) |
4212 | goto out4; |
4213 | error = -ENOENT; |
4214 | if (d_unlinked(dentry: new.dentry)) |
4215 | goto out4; |
4216 | error = -EBUSY; |
4217 | if (new_mnt == root_mnt || old_mnt == root_mnt) |
4218 | goto out4; /* loop, on the same file system */ |
4219 | error = -EINVAL; |
4220 | if (!path_mounted(path: &root)) |
4221 | goto out4; /* not a mountpoint */ |
4222 | if (!mnt_has_parent(mnt: root_mnt)) |
4223 | goto out4; /* not attached */ |
4224 | if (!path_mounted(path: &new)) |
4225 | goto out4; /* not a mountpoint */ |
4226 | if (!mnt_has_parent(mnt: new_mnt)) |
4227 | goto out4; /* not attached */ |
4228 | /* make sure we can reach put_old from new_root */ |
4229 | if (!is_path_reachable(mnt: old_mnt, dentry: old.dentry, root: &new)) |
4230 | goto out4; |
4231 | /* make certain new is below the root */ |
4232 | if (!is_path_reachable(mnt: new_mnt, dentry: new.dentry, root: &root)) |
4233 | goto out4; |
4234 | lock_mount_hash(); |
4235 | umount_mnt(mnt: new_mnt); |
4236 | root_mp = unhash_mnt(mnt: root_mnt); /* we'll need its mountpoint */ |
4237 | if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { |
4238 | new_mnt->mnt.mnt_flags |= MNT_LOCKED; |
4239 | root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; |
4240 | } |
4241 | /* mount old root on put_old */ |
4242 | attach_mnt(mnt: root_mnt, parent: old_mnt, mp: old_mp, beneath: false); |
4243 | /* mount new_root on / */ |
4244 | attach_mnt(mnt: new_mnt, parent: root_parent, mp: root_mp, beneath: false); |
4245 | mnt_add_count(mnt: root_parent, n: -1); |
4246 | touch_mnt_namespace(current->nsproxy->mnt_ns); |
4247 | /* A moved mount should not expire automatically */ |
4248 | list_del_init(entry: &new_mnt->mnt_expire); |
4249 | put_mountpoint(mp: root_mp); |
4250 | unlock_mount_hash(); |
4251 | chroot_fs_refs(&root, &new); |
4252 | error = 0; |
4253 | out4: |
4254 | unlock_mount(where: old_mp); |
4255 | if (!error) |
4256 | mntput_no_expire(mnt: ex_parent); |
4257 | out3: |
4258 | path_put(&root); |
4259 | out2: |
4260 | path_put(&old); |
4261 | out1: |
4262 | path_put(&new); |
4263 | out0: |
4264 | return error; |
4265 | } |
4266 | |
4267 | static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt) |
4268 | { |
4269 | unsigned int flags = mnt->mnt.mnt_flags; |
4270 | |
4271 | /* flags to clear */ |
4272 | flags &= ~kattr->attr_clr; |
4273 | /* flags to raise */ |
4274 | flags |= kattr->attr_set; |
4275 | |
4276 | return flags; |
4277 | } |
4278 | |
4279 | static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) |
4280 | { |
4281 | struct vfsmount *m = &mnt->mnt; |
4282 | struct user_namespace *fs_userns = m->mnt_sb->s_user_ns; |
4283 | |
4284 | if (!kattr->mnt_idmap) |
4285 | return 0; |
4286 | |
4287 | /* |
4288 | * Creating an idmapped mount with the filesystem wide idmapping |
4289 | * doesn't make sense so block that. We don't allow mushy semantics. |
4290 | */ |
4291 | if (!check_fsmapping(idmap: kattr->mnt_idmap, sb: m->mnt_sb)) |
4292 | return -EINVAL; |
4293 | |
4294 | /* |
4295 | * Once a mount has been idmapped we don't allow it to change its |
4296 | * mapping. It makes things simpler and callers can just create |
4297 | * another bind-mount they can idmap if they want to. |
4298 | */ |
4299 | if (is_idmapped_mnt(mnt: m)) |
4300 | return -EPERM; |
4301 | |
4302 | /* The underlying filesystem doesn't support idmapped mounts yet. */ |
4303 | if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP)) |
4304 | return -EINVAL; |
4305 | |
4306 | /* We're not controlling the superblock. */ |
4307 | if (!ns_capable(ns: fs_userns, CAP_SYS_ADMIN)) |
4308 | return -EPERM; |
4309 | |
4310 | /* Mount has already been visible in the filesystem hierarchy. */ |
4311 | if (!is_anon_ns(ns: mnt->mnt_ns)) |
4312 | return -EINVAL; |
4313 | |
4314 | return 0; |
4315 | } |
4316 | |
4317 | /** |
4318 | * mnt_allow_writers() - check whether the attribute change allows writers |
4319 | * @kattr: the new mount attributes |
4320 | * @mnt: the mount to which @kattr will be applied |
4321 | * |
4322 | * Check whether thew new mount attributes in @kattr allow concurrent writers. |
4323 | * |
4324 | * Return: true if writers need to be held, false if not |
4325 | */ |
4326 | static inline bool mnt_allow_writers(const struct mount_kattr *kattr, |
4327 | const struct mount *mnt) |
4328 | { |
4329 | return (!(kattr->attr_set & MNT_READONLY) || |
4330 | (mnt->mnt.mnt_flags & MNT_READONLY)) && |
4331 | !kattr->mnt_idmap; |
4332 | } |
4333 | |
4334 | static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt) |
4335 | { |
4336 | struct mount *m; |
4337 | int err; |
4338 | |
4339 | for (m = mnt; m; m = next_mnt(p: m, root: mnt)) { |
4340 | if (!can_change_locked_flags(mnt: m, mnt_flags: recalc_flags(kattr, mnt: m))) { |
4341 | err = -EPERM; |
4342 | break; |
4343 | } |
4344 | |
4345 | err = can_idmap_mount(kattr, mnt: m); |
4346 | if (err) |
4347 | break; |
4348 | |
4349 | if (!mnt_allow_writers(kattr, mnt: m)) { |
4350 | err = mnt_hold_writers(mnt: m); |
4351 | if (err) |
4352 | break; |
4353 | } |
4354 | |
4355 | if (!kattr->recurse) |
4356 | return 0; |
4357 | } |
4358 | |
4359 | if (err) { |
4360 | struct mount *p; |
4361 | |
4362 | /* |
4363 | * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will |
4364 | * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all |
4365 | * mounts and needs to take care to include the first mount. |
4366 | */ |
4367 | for (p = mnt; p; p = next_mnt(p, root: mnt)) { |
4368 | /* If we had to hold writers unblock them. */ |
4369 | if (p->mnt.mnt_flags & MNT_WRITE_HOLD) |
4370 | mnt_unhold_writers(mnt: p); |
4371 | |
4372 | /* |
4373 | * We're done once the first mount we changed got |
4374 | * MNT_WRITE_HOLD unset. |
4375 | */ |
4376 | if (p == m) |
4377 | break; |
4378 | } |
4379 | } |
4380 | return err; |
4381 | } |
4382 | |
4383 | static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) |
4384 | { |
4385 | if (!kattr->mnt_idmap) |
4386 | return; |
4387 | |
4388 | /* |
4389 | * Pairs with smp_load_acquire() in mnt_idmap(). |
4390 | * |
4391 | * Since we only allow a mount to change the idmapping once and |
4392 | * verified this in can_idmap_mount() we know that the mount has |
4393 | * @nop_mnt_idmap attached to it. So there's no need to drop any |
4394 | * references. |
4395 | */ |
4396 | smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap)); |
4397 | } |
4398 | |
4399 | static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt) |
4400 | { |
4401 | struct mount *m; |
4402 | |
4403 | for (m = mnt; m; m = next_mnt(p: m, root: mnt)) { |
4404 | unsigned int flags; |
4405 | |
4406 | do_idmap_mount(kattr, mnt: m); |
4407 | flags = recalc_flags(kattr, mnt: m); |
4408 | WRITE_ONCE(m->mnt.mnt_flags, flags); |
4409 | |
4410 | /* If we had to hold writers unblock them. */ |
4411 | if (m->mnt.mnt_flags & MNT_WRITE_HOLD) |
4412 | mnt_unhold_writers(mnt: m); |
4413 | |
4414 | if (kattr->propagation) |
4415 | change_mnt_propagation(m, kattr->propagation); |
4416 | if (!kattr->recurse) |
4417 | break; |
4418 | } |
4419 | touch_mnt_namespace(ns: mnt->mnt_ns); |
4420 | } |
4421 | |
4422 | static int do_mount_setattr(struct path *path, struct mount_kattr *kattr) |
4423 | { |
4424 | struct mount *mnt = real_mount(mnt: path->mnt); |
4425 | int err = 0; |
4426 | |
4427 | if (!path_mounted(path)) |
4428 | return -EINVAL; |
4429 | |
4430 | if (kattr->mnt_userns) { |
4431 | struct mnt_idmap *mnt_idmap; |
4432 | |
4433 | mnt_idmap = alloc_mnt_idmap(mnt_userns: kattr->mnt_userns); |
4434 | if (IS_ERR(ptr: mnt_idmap)) |
4435 | return PTR_ERR(ptr: mnt_idmap); |
4436 | kattr->mnt_idmap = mnt_idmap; |
4437 | } |
4438 | |
4439 | if (kattr->propagation) { |
4440 | /* |
4441 | * Only take namespace_lock() if we're actually changing |
4442 | * propagation. |
4443 | */ |
4444 | namespace_lock(); |
4445 | if (kattr->propagation == MS_SHARED) { |
4446 | err = invent_group_ids(mnt, recurse: kattr->recurse); |
4447 | if (err) { |
4448 | namespace_unlock(); |
4449 | return err; |
4450 | } |
4451 | } |
4452 | } |
4453 | |
4454 | err = -EINVAL; |
4455 | lock_mount_hash(); |
4456 | |
4457 | /* Ensure that this isn't anything purely vfs internal. */ |
4458 | if (!is_mounted(mnt: &mnt->mnt)) |
4459 | goto out; |
4460 | |
4461 | /* |
4462 | * If this is an attached mount make sure it's located in the callers |
4463 | * mount namespace. If it's not don't let the caller interact with it. |
4464 | * If this is a detached mount make sure it has an anonymous mount |
4465 | * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE. |
4466 | */ |
4467 | if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(ns: mnt->mnt_ns))) |
4468 | goto out; |
4469 | |
4470 | /* |
4471 | * First, we get the mount tree in a shape where we can change mount |
4472 | * properties without failure. If we succeeded to do so we commit all |
4473 | * changes and if we failed we clean up. |
4474 | */ |
4475 | err = mount_setattr_prepare(kattr, mnt); |
4476 | if (!err) |
4477 | mount_setattr_commit(kattr, mnt); |
4478 | |
4479 | out: |
4480 | unlock_mount_hash(); |
4481 | |
4482 | if (kattr->propagation) { |
4483 | if (err) |
4484 | cleanup_group_ids(mnt, NULL); |
4485 | namespace_unlock(); |
4486 | } |
4487 | |
4488 | return err; |
4489 | } |
4490 | |
4491 | static int build_mount_idmapped(const struct mount_attr *attr, size_t usize, |
4492 | struct mount_kattr *kattr, unsigned int flags) |
4493 | { |
4494 | int err = 0; |
4495 | struct ns_common *ns; |
4496 | struct user_namespace *mnt_userns; |
4497 | struct fd f; |
4498 | |
4499 | if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP)) |
4500 | return 0; |
4501 | |
4502 | /* |
4503 | * We currently do not support clearing an idmapped mount. If this ever |
4504 | * is a use-case we can revisit this but for now let's keep it simple |
4505 | * and not allow it. |
4506 | */ |
4507 | if (attr->attr_clr & MOUNT_ATTR_IDMAP) |
4508 | return -EINVAL; |
4509 | |
4510 | if (attr->userns_fd > INT_MAX) |
4511 | return -EINVAL; |
4512 | |
4513 | f = fdget(fd: attr->userns_fd); |
4514 | if (!f.file) |
4515 | return -EBADF; |
4516 | |
4517 | if (!proc_ns_file(file: f.file)) { |
4518 | err = -EINVAL; |
4519 | goto out_fput; |
4520 | } |
4521 | |
4522 | ns = get_proc_ns(file_inode(f.file)); |
4523 | if (ns->ops->type != CLONE_NEWUSER) { |
4524 | err = -EINVAL; |
4525 | goto out_fput; |
4526 | } |
4527 | |
4528 | /* |
4529 | * The initial idmapping cannot be used to create an idmapped |
4530 | * mount. We use the initial idmapping as an indicator of a mount |
4531 | * that is not idmapped. It can simply be passed into helpers that |
4532 | * are aware of idmapped mounts as a convenient shortcut. A user |
4533 | * can just create a dedicated identity mapping to achieve the same |
4534 | * result. |
4535 | */ |
4536 | mnt_userns = container_of(ns, struct user_namespace, ns); |
4537 | if (mnt_userns == &init_user_ns) { |
4538 | err = -EPERM; |
4539 | goto out_fput; |
4540 | } |
4541 | |
4542 | /* We're not controlling the target namespace. */ |
4543 | if (!ns_capable(ns: mnt_userns, CAP_SYS_ADMIN)) { |
4544 | err = -EPERM; |
4545 | goto out_fput; |
4546 | } |
4547 | |
4548 | kattr->mnt_userns = get_user_ns(ns: mnt_userns); |
4549 | |
4550 | out_fput: |
4551 | fdput(fd: f); |
4552 | return err; |
4553 | } |
4554 | |
4555 | static int build_mount_kattr(const struct mount_attr *attr, size_t usize, |
4556 | struct mount_kattr *kattr, unsigned int flags) |
4557 | { |
4558 | unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; |
4559 | |
4560 | if (flags & AT_NO_AUTOMOUNT) |
4561 | lookup_flags &= ~LOOKUP_AUTOMOUNT; |
4562 | if (flags & AT_SYMLINK_NOFOLLOW) |
4563 | lookup_flags &= ~LOOKUP_FOLLOW; |
4564 | if (flags & AT_EMPTY_PATH) |
4565 | lookup_flags |= LOOKUP_EMPTY; |
4566 | |
4567 | *kattr = (struct mount_kattr) { |
4568 | .lookup_flags = lookup_flags, |
4569 | .recurse = !!(flags & AT_RECURSIVE), |
4570 | }; |
4571 | |
4572 | if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS) |
4573 | return -EINVAL; |
4574 | if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1) |
4575 | return -EINVAL; |
4576 | kattr->propagation = attr->propagation; |
4577 | |
4578 | if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS) |
4579 | return -EINVAL; |
4580 | |
4581 | kattr->attr_set = attr_flags_to_mnt_flags(attr_flags: attr->attr_set); |
4582 | kattr->attr_clr = attr_flags_to_mnt_flags(attr_flags: attr->attr_clr); |
4583 | |
4584 | /* |
4585 | * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap, |
4586 | * users wanting to transition to a different atime setting cannot |
4587 | * simply specify the atime setting in @attr_set, but must also |
4588 | * specify MOUNT_ATTR__ATIME in the @attr_clr field. |
4589 | * So ensure that MOUNT_ATTR__ATIME can't be partially set in |
4590 | * @attr_clr and that @attr_set can't have any atime bits set if |
4591 | * MOUNT_ATTR__ATIME isn't set in @attr_clr. |
4592 | */ |
4593 | if (attr->attr_clr & MOUNT_ATTR__ATIME) { |
4594 | if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME) |
4595 | return -EINVAL; |
4596 | |
4597 | /* |
4598 | * Clear all previous time settings as they are mutually |
4599 | * exclusive. |
4600 | */ |
4601 | kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME; |
4602 | switch (attr->attr_set & MOUNT_ATTR__ATIME) { |
4603 | case MOUNT_ATTR_RELATIME: |
4604 | kattr->attr_set |= MNT_RELATIME; |
4605 | break; |
4606 | case MOUNT_ATTR_NOATIME: |
4607 | kattr->attr_set |= MNT_NOATIME; |
4608 | break; |
4609 | case MOUNT_ATTR_STRICTATIME: |
4610 | break; |
4611 | default: |
4612 | return -EINVAL; |
4613 | } |
4614 | } else { |
4615 | if (attr->attr_set & MOUNT_ATTR__ATIME) |
4616 | return -EINVAL; |
4617 | } |
4618 | |
4619 | return build_mount_idmapped(attr, usize, kattr, flags); |
4620 | } |
4621 | |
4622 | static void finish_mount_kattr(struct mount_kattr *kattr) |
4623 | { |
4624 | put_user_ns(ns: kattr->mnt_userns); |
4625 | kattr->mnt_userns = NULL; |
4626 | |
4627 | if (kattr->mnt_idmap) |
4628 | mnt_idmap_put(idmap: kattr->mnt_idmap); |
4629 | } |
4630 | |
4631 | SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path, |
4632 | unsigned int, flags, struct mount_attr __user *, uattr, |
4633 | size_t, usize) |
4634 | { |
4635 | int err; |
4636 | struct path target; |
4637 | struct mount_attr attr; |
4638 | struct mount_kattr kattr; |
4639 | |
4640 | BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0); |
4641 | |
4642 | if (flags & ~(AT_EMPTY_PATH | |
4643 | AT_RECURSIVE | |
4644 | AT_SYMLINK_NOFOLLOW | |
4645 | AT_NO_AUTOMOUNT)) |
4646 | return -EINVAL; |
4647 | |
4648 | if (unlikely(usize > PAGE_SIZE)) |
4649 | return -E2BIG; |
4650 | if (unlikely(usize < MOUNT_ATTR_SIZE_VER0)) |
4651 | return -EINVAL; |
4652 | |
4653 | if (!may_mount()) |
4654 | return -EPERM; |
4655 | |
4656 | err = copy_struct_from_user(dst: &attr, ksize: sizeof(attr), src: uattr, usize); |
4657 | if (err) |
4658 | return err; |
4659 | |
4660 | /* Don't bother walking through the mounts if this is a nop. */ |
4661 | if (attr.attr_set == 0 && |
4662 | attr.attr_clr == 0 && |
4663 | attr.propagation == 0) |
4664 | return 0; |
4665 | |
4666 | err = build_mount_kattr(attr: &attr, usize, kattr: &kattr, flags); |
4667 | if (err) |
4668 | return err; |
4669 | |
4670 | err = user_path_at(dfd, name: path, flags: kattr.lookup_flags, path: &target); |
4671 | if (!err) { |
4672 | err = do_mount_setattr(path: &target, kattr: &kattr); |
4673 | path_put(&target); |
4674 | } |
4675 | finish_mount_kattr(kattr: &kattr); |
4676 | return err; |
4677 | } |
4678 | |
4679 | static void __init init_mount_tree(void) |
4680 | { |
4681 | struct vfsmount *mnt; |
4682 | struct mount *m; |
4683 | struct mnt_namespace *ns; |
4684 | struct path root; |
4685 | |
4686 | mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs" , NULL); |
4687 | if (IS_ERR(ptr: mnt)) |
4688 | panic(fmt: "Can't create rootfs" ); |
4689 | |
4690 | ns = alloc_mnt_ns(user_ns: &init_user_ns, anon: false); |
4691 | if (IS_ERR(ptr: ns)) |
4692 | panic(fmt: "Can't allocate initial namespace" ); |
4693 | m = real_mount(mnt); |
4694 | m->mnt_ns = ns; |
4695 | ns->root = m; |
4696 | ns->mounts = 1; |
4697 | list_add(new: &m->mnt_list, head: &ns->list); |
4698 | init_task.nsproxy->mnt_ns = ns; |
4699 | get_mnt_ns(ns); |
4700 | |
4701 | root.mnt = mnt; |
4702 | root.dentry = mnt->mnt_root; |
4703 | mnt->mnt_flags |= MNT_LOCKED; |
4704 | |
4705 | set_fs_pwd(current->fs, &root); |
4706 | set_fs_root(current->fs, &root); |
4707 | } |
4708 | |
4709 | void __init mnt_init(void) |
4710 | { |
4711 | int err; |
4712 | |
4713 | mnt_cache = kmem_cache_create(name: "mnt_cache" , size: sizeof(struct mount), |
4714 | align: 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); |
4715 | |
4716 | mount_hashtable = alloc_large_system_hash(tablename: "Mount-cache" , |
4717 | bucketsize: sizeof(struct hlist_head), |
4718 | numentries: mhash_entries, scale: 19, |
4719 | HASH_ZERO, |
4720 | hash_shift: &m_hash_shift, hash_mask: &m_hash_mask, low_limit: 0, high_limit: 0); |
4721 | mountpoint_hashtable = alloc_large_system_hash(tablename: "Mountpoint-cache" , |
4722 | bucketsize: sizeof(struct hlist_head), |
4723 | numentries: mphash_entries, scale: 19, |
4724 | HASH_ZERO, |
4725 | hash_shift: &mp_hash_shift, hash_mask: &mp_hash_mask, low_limit: 0, high_limit: 0); |
4726 | |
4727 | if (!mount_hashtable || !mountpoint_hashtable) |
4728 | panic(fmt: "Failed to allocate mount hash table\n" ); |
4729 | |
4730 | kernfs_init(); |
4731 | |
4732 | err = sysfs_init(); |
4733 | if (err) |
4734 | printk(KERN_WARNING "%s: sysfs_init error: %d\n" , |
4735 | __func__, err); |
4736 | fs_kobj = kobject_create_and_add(name: "fs" , NULL); |
4737 | if (!fs_kobj) |
4738 | printk(KERN_WARNING "%s: kobj create error\n" , __func__); |
4739 | shmem_init(); |
4740 | init_rootfs(); |
4741 | init_mount_tree(); |
4742 | } |
4743 | |
4744 | void put_mnt_ns(struct mnt_namespace *ns) |
4745 | { |
4746 | if (!refcount_dec_and_test(r: &ns->ns.count)) |
4747 | return; |
4748 | drop_collected_mounts(mnt: &ns->root->mnt); |
4749 | free_mnt_ns(ns); |
4750 | } |
4751 | |
4752 | struct vfsmount *kern_mount(struct file_system_type *type) |
4753 | { |
4754 | struct vfsmount *mnt; |
4755 | mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); |
4756 | if (!IS_ERR(ptr: mnt)) { |
4757 | /* |
4758 | * it is a longterm mount, don't release mnt until |
4759 | * we unmount before file sys is unregistered |
4760 | */ |
4761 | real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; |
4762 | } |
4763 | return mnt; |
4764 | } |
4765 | EXPORT_SYMBOL_GPL(kern_mount); |
4766 | |
4767 | void kern_unmount(struct vfsmount *mnt) |
4768 | { |
4769 | /* release long term mount so mount point can be released */ |
4770 | if (!IS_ERR(ptr: mnt)) { |
4771 | mnt_make_shortterm(mnt); |
4772 | synchronize_rcu(); /* yecchhh... */ |
4773 | mntput(mnt); |
4774 | } |
4775 | } |
4776 | EXPORT_SYMBOL(kern_unmount); |
4777 | |
4778 | void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) |
4779 | { |
4780 | unsigned int i; |
4781 | |
4782 | for (i = 0; i < num; i++) |
4783 | mnt_make_shortterm(mnt: mnt[i]); |
4784 | synchronize_rcu_expedited(); |
4785 | for (i = 0; i < num; i++) |
4786 | mntput(mnt[i]); |
4787 | } |
4788 | EXPORT_SYMBOL(kern_unmount_array); |
4789 | |
4790 | bool our_mnt(struct vfsmount *mnt) |
4791 | { |
4792 | return check_mnt(mnt: real_mount(mnt)); |
4793 | } |
4794 | |
4795 | bool current_chrooted(void) |
4796 | { |
4797 | /* Does the current process have a non-standard root */ |
4798 | struct path ns_root; |
4799 | struct path fs_root; |
4800 | bool chrooted; |
4801 | |
4802 | /* Find the namespace root */ |
4803 | ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; |
4804 | ns_root.dentry = ns_root.mnt->mnt_root; |
4805 | path_get(&ns_root); |
4806 | while (d_mountpoint(dentry: ns_root.dentry) && follow_down_one(&ns_root)) |
4807 | ; |
4808 | |
4809 | get_fs_root(current->fs, root: &fs_root); |
4810 | |
4811 | chrooted = !path_equal(path1: &fs_root, path2: &ns_root); |
4812 | |
4813 | path_put(&fs_root); |
4814 | path_put(&ns_root); |
4815 | |
4816 | return chrooted; |
4817 | } |
4818 | |
4819 | static bool mnt_already_visible(struct mnt_namespace *ns, |
4820 | const struct super_block *sb, |
4821 | int *new_mnt_flags) |
4822 | { |
4823 | int new_flags = *new_mnt_flags; |
4824 | struct mount *mnt; |
4825 | bool visible = false; |
4826 | |
4827 | down_read(sem: &namespace_sem); |
4828 | lock_ns_list(ns); |
4829 | list_for_each_entry(mnt, &ns->list, mnt_list) { |
4830 | struct mount *child; |
4831 | int mnt_flags; |
4832 | |
4833 | if (mnt_is_cursor(mnt)) |
4834 | continue; |
4835 | |
4836 | if (mnt->mnt.mnt_sb->s_type != sb->s_type) |
4837 | continue; |
4838 | |
4839 | /* This mount is not fully visible if it's root directory |
4840 | * is not the root directory of the filesystem. |
4841 | */ |
4842 | if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) |
4843 | continue; |
4844 | |
4845 | /* A local view of the mount flags */ |
4846 | mnt_flags = mnt->mnt.mnt_flags; |
4847 | |
4848 | /* Don't miss readonly hidden in the superblock flags */ |
4849 | if (sb_rdonly(sb: mnt->mnt.mnt_sb)) |
4850 | mnt_flags |= MNT_LOCK_READONLY; |
4851 | |
4852 | /* Verify the mount flags are equal to or more permissive |
4853 | * than the proposed new mount. |
4854 | */ |
4855 | if ((mnt_flags & MNT_LOCK_READONLY) && |
4856 | !(new_flags & MNT_READONLY)) |
4857 | continue; |
4858 | if ((mnt_flags & MNT_LOCK_ATIME) && |
4859 | ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) |
4860 | continue; |
4861 | |
4862 | /* This mount is not fully visible if there are any |
4863 | * locked child mounts that cover anything except for |
4864 | * empty directories. |
4865 | */ |
4866 | list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { |
4867 | struct inode *inode = child->mnt_mountpoint->d_inode; |
4868 | /* Only worry about locked mounts */ |
4869 | if (!(child->mnt.mnt_flags & MNT_LOCKED)) |
4870 | continue; |
4871 | /* Is the directory permanetly empty? */ |
4872 | if (!is_empty_dir_inode(inode)) |
4873 | goto next; |
4874 | } |
4875 | /* Preserve the locked attributes */ |
4876 | *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \ |
4877 | MNT_LOCK_ATIME); |
4878 | visible = true; |
4879 | goto found; |
4880 | next: ; |
4881 | } |
4882 | found: |
4883 | unlock_ns_list(ns); |
4884 | up_read(sem: &namespace_sem); |
4885 | return visible; |
4886 | } |
4887 | |
4888 | static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags) |
4889 | { |
4890 | const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV; |
4891 | struct mnt_namespace *ns = current->nsproxy->mnt_ns; |
4892 | unsigned long s_iflags; |
4893 | |
4894 | if (ns->user_ns == &init_user_ns) |
4895 | return false; |
4896 | |
4897 | /* Can this filesystem be too revealing? */ |
4898 | s_iflags = sb->s_iflags; |
4899 | if (!(s_iflags & SB_I_USERNS_VISIBLE)) |
4900 | return false; |
4901 | |
4902 | if ((s_iflags & required_iflags) != required_iflags) { |
4903 | WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n" , |
4904 | required_iflags); |
4905 | return true; |
4906 | } |
4907 | |
4908 | return !mnt_already_visible(ns, sb, new_mnt_flags); |
4909 | } |
4910 | |
4911 | bool mnt_may_suid(struct vfsmount *mnt) |
4912 | { |
4913 | /* |
4914 | * Foreign mounts (accessed via fchdir or through /proc |
4915 | * symlinks) are always treated as if they are nosuid. This |
4916 | * prevents namespaces from trusting potentially unsafe |
4917 | * suid/sgid bits, file caps, or security labels that originate |
4918 | * in other namespaces. |
4919 | */ |
4920 | return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(mnt: real_mount(mnt)) && |
4921 | current_in_userns(target_ns: mnt->mnt_sb->s_user_ns); |
4922 | } |
4923 | |
4924 | static struct ns_common *mntns_get(struct task_struct *task) |
4925 | { |
4926 | struct ns_common *ns = NULL; |
4927 | struct nsproxy *nsproxy; |
4928 | |
4929 | task_lock(p: task); |
4930 | nsproxy = task->nsproxy; |
4931 | if (nsproxy) { |
4932 | ns = &nsproxy->mnt_ns->ns; |
4933 | get_mnt_ns(ns: to_mnt_ns(ns)); |
4934 | } |
4935 | task_unlock(p: task); |
4936 | |
4937 | return ns; |
4938 | } |
4939 | |
4940 | static void mntns_put(struct ns_common *ns) |
4941 | { |
4942 | put_mnt_ns(ns: to_mnt_ns(ns)); |
4943 | } |
4944 | |
4945 | static int mntns_install(struct nsset *nsset, struct ns_common *ns) |
4946 | { |
4947 | struct nsproxy *nsproxy = nsset->nsproxy; |
4948 | struct fs_struct *fs = nsset->fs; |
4949 | struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; |
4950 | struct user_namespace *user_ns = nsset->cred->user_ns; |
4951 | struct path root; |
4952 | int err; |
4953 | |
4954 | if (!ns_capable(ns: mnt_ns->user_ns, CAP_SYS_ADMIN) || |
4955 | !ns_capable(ns: user_ns, CAP_SYS_CHROOT) || |
4956 | !ns_capable(ns: user_ns, CAP_SYS_ADMIN)) |
4957 | return -EPERM; |
4958 | |
4959 | if (is_anon_ns(ns: mnt_ns)) |
4960 | return -EINVAL; |
4961 | |
4962 | if (fs->users != 1) |
4963 | return -EINVAL; |
4964 | |
4965 | get_mnt_ns(ns: mnt_ns); |
4966 | old_mnt_ns = nsproxy->mnt_ns; |
4967 | nsproxy->mnt_ns = mnt_ns; |
4968 | |
4969 | /* Find the root */ |
4970 | err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, |
4971 | "/" , LOOKUP_DOWN, &root); |
4972 | if (err) { |
4973 | /* revert to old namespace */ |
4974 | nsproxy->mnt_ns = old_mnt_ns; |
4975 | put_mnt_ns(ns: mnt_ns); |
4976 | return err; |
4977 | } |
4978 | |
4979 | put_mnt_ns(ns: old_mnt_ns); |
4980 | |
4981 | /* Update the pwd and root */ |
4982 | set_fs_pwd(fs, &root); |
4983 | set_fs_root(fs, &root); |
4984 | |
4985 | path_put(&root); |
4986 | return 0; |
4987 | } |
4988 | |
4989 | static struct user_namespace *mntns_owner(struct ns_common *ns) |
4990 | { |
4991 | return to_mnt_ns(ns)->user_ns; |
4992 | } |
4993 | |
4994 | const struct proc_ns_operations mntns_operations = { |
4995 | .name = "mnt" , |
4996 | .type = CLONE_NEWNS, |
4997 | .get = mntns_get, |
4998 | .put = mntns_put, |
4999 | .install = mntns_install, |
5000 | .owner = mntns_owner, |
5001 | }; |
5002 | |
5003 | #ifdef CONFIG_SYSCTL |
5004 | static struct ctl_table fs_namespace_sysctls[] = { |
5005 | { |
5006 | .procname = "mount-max" , |
5007 | .data = &sysctl_mount_max, |
5008 | .maxlen = sizeof(unsigned int), |
5009 | .mode = 0644, |
5010 | .proc_handler = proc_dointvec_minmax, |
5011 | .extra1 = SYSCTL_ONE, |
5012 | }, |
5013 | { } |
5014 | }; |
5015 | |
5016 | static int __init init_fs_namespace_sysctls(void) |
5017 | { |
5018 | register_sysctl_init("fs" , fs_namespace_sysctls); |
5019 | return 0; |
5020 | } |
5021 | fs_initcall(init_fs_namespace_sysctls); |
5022 | |
5023 | #endif /* CONFIG_SYSCTL */ |
5024 | |