1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/kernel/fork.c |
4 | * |
5 | * Copyright (C) 1991, 1992 Linus Torvalds |
6 | */ |
7 | |
8 | /* |
9 | * 'fork.c' contains the help-routines for the 'fork' system call |
10 | * (see also entry.S and others). |
11 | * Fork is rather simple, once you get the hang of it, but the memory |
12 | * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' |
13 | */ |
14 | |
15 | #include <linux/anon_inodes.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/sched/autogroup.h> |
18 | #include <linux/sched/mm.h> |
19 | #include <linux/sched/coredump.h> |
20 | #include <linux/sched/user.h> |
21 | #include <linux/sched/numa_balancing.h> |
22 | #include <linux/sched/stat.h> |
23 | #include <linux/sched/task.h> |
24 | #include <linux/sched/task_stack.h> |
25 | #include <linux/sched/cputime.h> |
26 | #include <linux/seq_file.h> |
27 | #include <linux/rtmutex.h> |
28 | #include <linux/init.h> |
29 | #include <linux/unistd.h> |
30 | #include <linux/module.h> |
31 | #include <linux/vmalloc.h> |
32 | #include <linux/completion.h> |
33 | #include <linux/personality.h> |
34 | #include <linux/mempolicy.h> |
35 | #include <linux/sem.h> |
36 | #include <linux/file.h> |
37 | #include <linux/fdtable.h> |
38 | #include <linux/iocontext.h> |
39 | #include <linux/key.h> |
40 | #include <linux/kmsan.h> |
41 | #include <linux/binfmts.h> |
42 | #include <linux/mman.h> |
43 | #include <linux/mmu_notifier.h> |
44 | #include <linux/fs.h> |
45 | #include <linux/mm.h> |
46 | #include <linux/mm_inline.h> |
47 | #include <linux/nsproxy.h> |
48 | #include <linux/capability.h> |
49 | #include <linux/cpu.h> |
50 | #include <linux/cgroup.h> |
51 | #include <linux/security.h> |
52 | #include <linux/hugetlb.h> |
53 | #include <linux/seccomp.h> |
54 | #include <linux/swap.h> |
55 | #include <linux/syscalls.h> |
56 | #include <linux/syscall_user_dispatch.h> |
57 | #include <linux/jiffies.h> |
58 | #include <linux/futex.h> |
59 | #include <linux/compat.h> |
60 | #include <linux/kthread.h> |
61 | #include <linux/task_io_accounting_ops.h> |
62 | #include <linux/rcupdate.h> |
63 | #include <linux/ptrace.h> |
64 | #include <linux/mount.h> |
65 | #include <linux/audit.h> |
66 | #include <linux/memcontrol.h> |
67 | #include <linux/ftrace.h> |
68 | #include <linux/proc_fs.h> |
69 | #include <linux/profile.h> |
70 | #include <linux/rmap.h> |
71 | #include <linux/ksm.h> |
72 | #include <linux/acct.h> |
73 | #include <linux/userfaultfd_k.h> |
74 | #include <linux/tsacct_kern.h> |
75 | #include <linux/cn_proc.h> |
76 | #include <linux/freezer.h> |
77 | #include <linux/delayacct.h> |
78 | #include <linux/taskstats_kern.h> |
79 | #include <linux/tty.h> |
80 | #include <linux/fs_struct.h> |
81 | #include <linux/magic.h> |
82 | #include <linux/perf_event.h> |
83 | #include <linux/posix-timers.h> |
84 | #include <linux/user-return-notifier.h> |
85 | #include <linux/oom.h> |
86 | #include <linux/khugepaged.h> |
87 | #include <linux/signalfd.h> |
88 | #include <linux/uprobes.h> |
89 | #include <linux/aio.h> |
90 | #include <linux/compiler.h> |
91 | #include <linux/sysctl.h> |
92 | #include <linux/kcov.h> |
93 | #include <linux/livepatch.h> |
94 | #include <linux/thread_info.h> |
95 | #include <linux/stackleak.h> |
96 | #include <linux/kasan.h> |
97 | #include <linux/scs.h> |
98 | #include <linux/io_uring.h> |
99 | #include <linux/bpf.h> |
100 | #include <linux/stackprotector.h> |
101 | #include <linux/user_events.h> |
102 | #include <linux/iommu.h> |
103 | #include <linux/rseq.h> |
104 | #include <uapi/linux/pidfd.h> |
105 | #include <linux/pidfs.h> |
106 | |
107 | #include <asm/pgalloc.h> |
108 | #include <linux/uaccess.h> |
109 | #include <asm/mmu_context.h> |
110 | #include <asm/cacheflush.h> |
111 | #include <asm/tlbflush.h> |
112 | |
113 | #include <trace/events/sched.h> |
114 | |
115 | #define CREATE_TRACE_POINTS |
116 | #include <trace/events/task.h> |
117 | |
118 | /* |
119 | * Minimum number of threads to boot the kernel |
120 | */ |
121 | #define MIN_THREADS 20 |
122 | |
123 | /* |
124 | * Maximum number of threads |
125 | */ |
126 | #define MAX_THREADS FUTEX_TID_MASK |
127 | |
128 | /* |
129 | * Protected counters by write_lock_irq(&tasklist_lock) |
130 | */ |
131 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
132 | int nr_threads; /* The idle threads do not count.. */ |
133 | |
134 | static int max_threads; /* tunable limit on nr_threads */ |
135 | |
136 | #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) |
137 | |
138 | static const char * const resident_page_types[] = { |
139 | NAMED_ARRAY_INDEX(MM_FILEPAGES), |
140 | NAMED_ARRAY_INDEX(MM_ANONPAGES), |
141 | NAMED_ARRAY_INDEX(MM_SWAPENTS), |
142 | NAMED_ARRAY_INDEX(MM_SHMEMPAGES), |
143 | }; |
144 | |
145 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
146 | |
147 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
148 | |
149 | #ifdef CONFIG_PROVE_RCU |
150 | int lockdep_tasklist_lock_is_held(void) |
151 | { |
152 | return lockdep_is_held(&tasklist_lock); |
153 | } |
154 | EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); |
155 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
156 | |
157 | int nr_processes(void) |
158 | { |
159 | int cpu; |
160 | int total = 0; |
161 | |
162 | for_each_possible_cpu(cpu) |
163 | total += per_cpu(process_counts, cpu); |
164 | |
165 | return total; |
166 | } |
167 | |
168 | void __weak arch_release_task_struct(struct task_struct *tsk) |
169 | { |
170 | } |
171 | |
172 | static struct kmem_cache *task_struct_cachep; |
173 | |
174 | static inline struct task_struct *alloc_task_struct_node(int node) |
175 | { |
176 | return kmem_cache_alloc_node(s: task_struct_cachep, GFP_KERNEL, node); |
177 | } |
178 | |
179 | static inline void free_task_struct(struct task_struct *tsk) |
180 | { |
181 | kmem_cache_free(s: task_struct_cachep, objp: tsk); |
182 | } |
183 | |
184 | /* |
185 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a |
186 | * kmemcache based allocator. |
187 | */ |
188 | # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) |
189 | |
190 | # ifdef CONFIG_VMAP_STACK |
191 | /* |
192 | * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB |
193 | * flush. Try to minimize the number of calls by caching stacks. |
194 | */ |
195 | #define NR_CACHED_STACKS 2 |
196 | static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); |
197 | |
198 | struct vm_stack { |
199 | struct rcu_head rcu; |
200 | struct vm_struct *stack_vm_area; |
201 | }; |
202 | |
203 | static bool try_release_thread_stack_to_cache(struct vm_struct *vm) |
204 | { |
205 | unsigned int i; |
206 | |
207 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
208 | if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) |
209 | continue; |
210 | return true; |
211 | } |
212 | return false; |
213 | } |
214 | |
215 | static void thread_stack_free_rcu(struct rcu_head *rh) |
216 | { |
217 | struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu); |
218 | |
219 | if (try_release_thread_stack_to_cache(vm: vm_stack->stack_vm_area)) |
220 | return; |
221 | |
222 | vfree(addr: vm_stack); |
223 | } |
224 | |
225 | static void thread_stack_delayed_free(struct task_struct *tsk) |
226 | { |
227 | struct vm_stack *vm_stack = tsk->stack; |
228 | |
229 | vm_stack->stack_vm_area = tsk->stack_vm_area; |
230 | call_rcu(head: &vm_stack->rcu, func: thread_stack_free_rcu); |
231 | } |
232 | |
233 | static int free_vm_stack_cache(unsigned int cpu) |
234 | { |
235 | struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); |
236 | int i; |
237 | |
238 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
239 | struct vm_struct *vm_stack = cached_vm_stacks[i]; |
240 | |
241 | if (!vm_stack) |
242 | continue; |
243 | |
244 | vfree(addr: vm_stack->addr); |
245 | cached_vm_stacks[i] = NULL; |
246 | } |
247 | |
248 | return 0; |
249 | } |
250 | |
251 | static int memcg_charge_kernel_stack(struct vm_struct *vm) |
252 | { |
253 | int i; |
254 | int ret; |
255 | int nr_charged = 0; |
256 | |
257 | BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); |
258 | |
259 | for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { |
260 | ret = memcg_kmem_charge_page(page: vm->pages[i], GFP_KERNEL, order: 0); |
261 | if (ret) |
262 | goto err; |
263 | nr_charged++; |
264 | } |
265 | return 0; |
266 | err: |
267 | for (i = 0; i < nr_charged; i++) |
268 | memcg_kmem_uncharge_page(page: vm->pages[i], order: 0); |
269 | return ret; |
270 | } |
271 | |
272 | static int alloc_thread_stack_node(struct task_struct *tsk, int node) |
273 | { |
274 | struct vm_struct *vm; |
275 | void *stack; |
276 | int i; |
277 | |
278 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
279 | struct vm_struct *s; |
280 | |
281 | s = this_cpu_xchg(cached_stacks[i], NULL); |
282 | |
283 | if (!s) |
284 | continue; |
285 | |
286 | /* Reset stack metadata. */ |
287 | kasan_unpoison_range(address: s->addr, THREAD_SIZE); |
288 | |
289 | stack = kasan_reset_tag(addr: s->addr); |
290 | |
291 | /* Clear stale pointers from reused stack. */ |
292 | memset(stack, 0, THREAD_SIZE); |
293 | |
294 | if (memcg_charge_kernel_stack(vm: s)) { |
295 | vfree(addr: s->addr); |
296 | return -ENOMEM; |
297 | } |
298 | |
299 | tsk->stack_vm_area = s; |
300 | tsk->stack = stack; |
301 | return 0; |
302 | } |
303 | |
304 | /* |
305 | * Allocated stacks are cached and later reused by new threads, |
306 | * so memcg accounting is performed manually on assigning/releasing |
307 | * stacks to tasks. Drop __GFP_ACCOUNT. |
308 | */ |
309 | stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, |
310 | VMALLOC_START, VMALLOC_END, |
311 | THREADINFO_GFP & ~__GFP_ACCOUNT, |
312 | PAGE_KERNEL, |
313 | vm_flags: 0, node, caller: __builtin_return_address(0)); |
314 | if (!stack) |
315 | return -ENOMEM; |
316 | |
317 | vm = find_vm_area(addr: stack); |
318 | if (memcg_charge_kernel_stack(vm)) { |
319 | vfree(addr: stack); |
320 | return -ENOMEM; |
321 | } |
322 | /* |
323 | * We can't call find_vm_area() in interrupt context, and |
324 | * free_thread_stack() can be called in interrupt context, |
325 | * so cache the vm_struct. |
326 | */ |
327 | tsk->stack_vm_area = vm; |
328 | stack = kasan_reset_tag(addr: stack); |
329 | tsk->stack = stack; |
330 | return 0; |
331 | } |
332 | |
333 | static void free_thread_stack(struct task_struct *tsk) |
334 | { |
335 | if (!try_release_thread_stack_to_cache(vm: tsk->stack_vm_area)) |
336 | thread_stack_delayed_free(tsk); |
337 | |
338 | tsk->stack = NULL; |
339 | tsk->stack_vm_area = NULL; |
340 | } |
341 | |
342 | # else /* !CONFIG_VMAP_STACK */ |
343 | |
344 | static void thread_stack_free_rcu(struct rcu_head *rh) |
345 | { |
346 | __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER); |
347 | } |
348 | |
349 | static void thread_stack_delayed_free(struct task_struct *tsk) |
350 | { |
351 | struct rcu_head *rh = tsk->stack; |
352 | |
353 | call_rcu(rh, thread_stack_free_rcu); |
354 | } |
355 | |
356 | static int alloc_thread_stack_node(struct task_struct *tsk, int node) |
357 | { |
358 | struct page *page = alloc_pages_node(node, THREADINFO_GFP, |
359 | THREAD_SIZE_ORDER); |
360 | |
361 | if (likely(page)) { |
362 | tsk->stack = kasan_reset_tag(page_address(page)); |
363 | return 0; |
364 | } |
365 | return -ENOMEM; |
366 | } |
367 | |
368 | static void free_thread_stack(struct task_struct *tsk) |
369 | { |
370 | thread_stack_delayed_free(tsk); |
371 | tsk->stack = NULL; |
372 | } |
373 | |
374 | # endif /* CONFIG_VMAP_STACK */ |
375 | # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */ |
376 | |
377 | static struct kmem_cache *thread_stack_cache; |
378 | |
379 | static void thread_stack_free_rcu(struct rcu_head *rh) |
380 | { |
381 | kmem_cache_free(thread_stack_cache, rh); |
382 | } |
383 | |
384 | static void thread_stack_delayed_free(struct task_struct *tsk) |
385 | { |
386 | struct rcu_head *rh = tsk->stack; |
387 | |
388 | call_rcu(rh, thread_stack_free_rcu); |
389 | } |
390 | |
391 | static int alloc_thread_stack_node(struct task_struct *tsk, int node) |
392 | { |
393 | unsigned long *stack; |
394 | stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); |
395 | stack = kasan_reset_tag(stack); |
396 | tsk->stack = stack; |
397 | return stack ? 0 : -ENOMEM; |
398 | } |
399 | |
400 | static void free_thread_stack(struct task_struct *tsk) |
401 | { |
402 | thread_stack_delayed_free(tsk); |
403 | tsk->stack = NULL; |
404 | } |
405 | |
406 | void thread_stack_cache_init(void) |
407 | { |
408 | thread_stack_cache = kmem_cache_create_usercopy("thread_stack" , |
409 | THREAD_SIZE, THREAD_SIZE, 0, 0, |
410 | THREAD_SIZE, NULL); |
411 | BUG_ON(thread_stack_cache == NULL); |
412 | } |
413 | |
414 | # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */ |
415 | |
416 | /* SLAB cache for signal_struct structures (tsk->signal) */ |
417 | static struct kmem_cache *signal_cachep; |
418 | |
419 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ |
420 | struct kmem_cache *sighand_cachep; |
421 | |
422 | /* SLAB cache for files_struct structures (tsk->files) */ |
423 | struct kmem_cache *files_cachep; |
424 | |
425 | /* SLAB cache for fs_struct structures (tsk->fs) */ |
426 | struct kmem_cache *fs_cachep; |
427 | |
428 | /* SLAB cache for vm_area_struct structures */ |
429 | static struct kmem_cache *vm_area_cachep; |
430 | |
431 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
432 | static struct kmem_cache *mm_cachep; |
433 | |
434 | #ifdef CONFIG_PER_VMA_LOCK |
435 | |
436 | /* SLAB cache for vm_area_struct.lock */ |
437 | static struct kmem_cache *vma_lock_cachep; |
438 | |
439 | static bool vma_lock_alloc(struct vm_area_struct *vma) |
440 | { |
441 | vma->vm_lock = kmem_cache_alloc(cachep: vma_lock_cachep, GFP_KERNEL); |
442 | if (!vma->vm_lock) |
443 | return false; |
444 | |
445 | init_rwsem(&vma->vm_lock->lock); |
446 | vma->vm_lock_seq = -1; |
447 | |
448 | return true; |
449 | } |
450 | |
451 | static inline void vma_lock_free(struct vm_area_struct *vma) |
452 | { |
453 | kmem_cache_free(s: vma_lock_cachep, objp: vma->vm_lock); |
454 | } |
455 | |
456 | #else /* CONFIG_PER_VMA_LOCK */ |
457 | |
458 | static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } |
459 | static inline void vma_lock_free(struct vm_area_struct *vma) {} |
460 | |
461 | #endif /* CONFIG_PER_VMA_LOCK */ |
462 | |
463 | struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) |
464 | { |
465 | struct vm_area_struct *vma; |
466 | |
467 | vma = kmem_cache_alloc(cachep: vm_area_cachep, GFP_KERNEL); |
468 | if (!vma) |
469 | return NULL; |
470 | |
471 | vma_init(vma, mm); |
472 | if (!vma_lock_alloc(vma)) { |
473 | kmem_cache_free(s: vm_area_cachep, objp: vma); |
474 | return NULL; |
475 | } |
476 | |
477 | return vma; |
478 | } |
479 | |
480 | struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) |
481 | { |
482 | struct vm_area_struct *new = kmem_cache_alloc(cachep: vm_area_cachep, GFP_KERNEL); |
483 | |
484 | if (!new) |
485 | return NULL; |
486 | |
487 | ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); |
488 | ASSERT_EXCLUSIVE_WRITER(orig->vm_file); |
489 | /* |
490 | * orig->shared.rb may be modified concurrently, but the clone |
491 | * will be reinitialized. |
492 | */ |
493 | data_race(memcpy(new, orig, sizeof(*new))); |
494 | if (!vma_lock_alloc(vma: new)) { |
495 | kmem_cache_free(s: vm_area_cachep, objp: new); |
496 | return NULL; |
497 | } |
498 | INIT_LIST_HEAD(list: &new->anon_vma_chain); |
499 | vma_numab_state_init(vma: new); |
500 | dup_anon_vma_name(orig_vma: orig, new_vma: new); |
501 | |
502 | return new; |
503 | } |
504 | |
505 | void __vm_area_free(struct vm_area_struct *vma) |
506 | { |
507 | vma_numab_state_free(vma); |
508 | free_anon_vma_name(vma); |
509 | vma_lock_free(vma); |
510 | kmem_cache_free(s: vm_area_cachep, objp: vma); |
511 | } |
512 | |
513 | #ifdef CONFIG_PER_VMA_LOCK |
514 | static void vm_area_free_rcu_cb(struct rcu_head *head) |
515 | { |
516 | struct vm_area_struct *vma = container_of(head, struct vm_area_struct, |
517 | vm_rcu); |
518 | |
519 | /* The vma should not be locked while being destroyed. */ |
520 | VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); |
521 | __vm_area_free(vma); |
522 | } |
523 | #endif |
524 | |
525 | void vm_area_free(struct vm_area_struct *vma) |
526 | { |
527 | #ifdef CONFIG_PER_VMA_LOCK |
528 | call_rcu(head: &vma->vm_rcu, func: vm_area_free_rcu_cb); |
529 | #else |
530 | __vm_area_free(vma); |
531 | #endif |
532 | } |
533 | |
534 | static void account_kernel_stack(struct task_struct *tsk, int account) |
535 | { |
536 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { |
537 | struct vm_struct *vm = task_stack_vm_area(t: tsk); |
538 | int i; |
539 | |
540 | for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) |
541 | mod_lruvec_page_state(page: vm->pages[i], idx: NR_KERNEL_STACK_KB, |
542 | val: account * (PAGE_SIZE / 1024)); |
543 | } else { |
544 | void *stack = task_stack_page(task: tsk); |
545 | |
546 | /* All stack pages are in the same node. */ |
547 | mod_lruvec_kmem_state(p: stack, idx: NR_KERNEL_STACK_KB, |
548 | val: account * (THREAD_SIZE / 1024)); |
549 | } |
550 | } |
551 | |
552 | void exit_task_stack_account(struct task_struct *tsk) |
553 | { |
554 | account_kernel_stack(tsk, account: -1); |
555 | |
556 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { |
557 | struct vm_struct *vm; |
558 | int i; |
559 | |
560 | vm = task_stack_vm_area(t: tsk); |
561 | for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) |
562 | memcg_kmem_uncharge_page(page: vm->pages[i], order: 0); |
563 | } |
564 | } |
565 | |
566 | static void release_task_stack(struct task_struct *tsk) |
567 | { |
568 | if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) |
569 | return; /* Better to leak the stack than to free prematurely */ |
570 | |
571 | free_thread_stack(tsk); |
572 | } |
573 | |
574 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
575 | void put_task_stack(struct task_struct *tsk) |
576 | { |
577 | if (refcount_dec_and_test(r: &tsk->stack_refcount)) |
578 | release_task_stack(tsk); |
579 | } |
580 | #endif |
581 | |
582 | void free_task(struct task_struct *tsk) |
583 | { |
584 | #ifdef CONFIG_SECCOMP |
585 | WARN_ON_ONCE(tsk->seccomp.filter); |
586 | #endif |
587 | release_user_cpus_ptr(p: tsk); |
588 | scs_release(tsk); |
589 | |
590 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
591 | /* |
592 | * The task is finally done with both the stack and thread_info, |
593 | * so free both. |
594 | */ |
595 | release_task_stack(tsk); |
596 | #else |
597 | /* |
598 | * If the task had a separate stack allocation, it should be gone |
599 | * by now. |
600 | */ |
601 | WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); |
602 | #endif |
603 | rt_mutex_debug_task_free(tsk); |
604 | ftrace_graph_exit_task(t: tsk); |
605 | arch_release_task_struct(tsk); |
606 | if (tsk->flags & PF_KTHREAD) |
607 | free_kthread_struct(k: tsk); |
608 | bpf_task_storage_free(task: tsk); |
609 | free_task_struct(tsk); |
610 | } |
611 | EXPORT_SYMBOL(free_task); |
612 | |
613 | static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) |
614 | { |
615 | struct file *exe_file; |
616 | |
617 | exe_file = get_mm_exe_file(mm: oldmm); |
618 | RCU_INIT_POINTER(mm->exe_file, exe_file); |
619 | /* |
620 | * We depend on the oldmm having properly denied write access to the |
621 | * exe_file already. |
622 | */ |
623 | if (exe_file && deny_write_access(file: exe_file)) |
624 | pr_warn_once("deny_write_access() failed in %s\n" , __func__); |
625 | } |
626 | |
627 | #ifdef CONFIG_MMU |
628 | static __latent_entropy int dup_mmap(struct mm_struct *mm, |
629 | struct mm_struct *oldmm) |
630 | { |
631 | struct vm_area_struct *mpnt, *tmp; |
632 | int retval; |
633 | unsigned long charge = 0; |
634 | LIST_HEAD(uf); |
635 | VMA_ITERATOR(vmi, mm, 0); |
636 | |
637 | uprobe_start_dup_mmap(); |
638 | if (mmap_write_lock_killable(mm: oldmm)) { |
639 | retval = -EINTR; |
640 | goto fail_uprobe_end; |
641 | } |
642 | flush_cache_dup_mm(mm: oldmm); |
643 | uprobe_dup_mmap(oldmm, newmm: mm); |
644 | /* |
645 | * Not linked in yet - no deadlock potential: |
646 | */ |
647 | mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); |
648 | |
649 | /* No ordering required: file already has been exposed. */ |
650 | dup_mm_exe_file(mm, oldmm); |
651 | |
652 | mm->total_vm = oldmm->total_vm; |
653 | mm->data_vm = oldmm->data_vm; |
654 | mm->exec_vm = oldmm->exec_vm; |
655 | mm->stack_vm = oldmm->stack_vm; |
656 | |
657 | retval = ksm_fork(mm, oldmm); |
658 | if (retval) |
659 | goto out; |
660 | khugepaged_fork(mm, oldmm); |
661 | |
662 | /* Use __mt_dup() to efficiently build an identical maple tree. */ |
663 | retval = __mt_dup(mt: &oldmm->mm_mt, new: &mm->mm_mt, GFP_KERNEL); |
664 | if (unlikely(retval)) |
665 | goto out; |
666 | |
667 | mt_clear_in_rcu(mt: vmi.mas.tree); |
668 | for_each_vma(vmi, mpnt) { |
669 | struct file *file; |
670 | |
671 | vma_start_write(vma: mpnt); |
672 | if (mpnt->vm_flags & VM_DONTCOPY) { |
673 | retval = vma_iter_clear_gfp(vmi: &vmi, start: mpnt->vm_start, |
674 | end: mpnt->vm_end, GFP_KERNEL); |
675 | if (retval) |
676 | goto loop_out; |
677 | |
678 | vm_stat_account(mm, mpnt->vm_flags, npages: -vma_pages(vma: mpnt)); |
679 | continue; |
680 | } |
681 | charge = 0; |
682 | /* |
683 | * Don't duplicate many vmas if we've been oom-killed (for |
684 | * example) |
685 | */ |
686 | if (fatal_signal_pending(current)) { |
687 | retval = -EINTR; |
688 | goto loop_out; |
689 | } |
690 | if (mpnt->vm_flags & VM_ACCOUNT) { |
691 | unsigned long len = vma_pages(vma: mpnt); |
692 | |
693 | if (security_vm_enough_memory_mm(mm: oldmm, pages: len)) /* sic */ |
694 | goto fail_nomem; |
695 | charge = len; |
696 | } |
697 | tmp = vm_area_dup(orig: mpnt); |
698 | if (!tmp) |
699 | goto fail_nomem; |
700 | retval = vma_dup_policy(src: mpnt, dst: tmp); |
701 | if (retval) |
702 | goto fail_nomem_policy; |
703 | tmp->vm_mm = mm; |
704 | retval = dup_userfaultfd(tmp, &uf); |
705 | if (retval) |
706 | goto fail_nomem_anon_vma_fork; |
707 | if (tmp->vm_flags & VM_WIPEONFORK) { |
708 | /* |
709 | * VM_WIPEONFORK gets a clean slate in the child. |
710 | * Don't prepare anon_vma until fault since we don't |
711 | * copy page for current vma. |
712 | */ |
713 | tmp->anon_vma = NULL; |
714 | } else if (anon_vma_fork(tmp, mpnt)) |
715 | goto fail_nomem_anon_vma_fork; |
716 | vm_flags_clear(vma: tmp, VM_LOCKED_MASK); |
717 | /* |
718 | * Copy/update hugetlb private vma information. |
719 | */ |
720 | if (is_vm_hugetlb_page(vma: tmp)) |
721 | hugetlb_dup_vma_private(vma: tmp); |
722 | |
723 | /* |
724 | * Link the vma into the MT. After using __mt_dup(), memory |
725 | * allocation is not necessary here, so it cannot fail. |
726 | */ |
727 | vma_iter_bulk_store(vmi: &vmi, vma: tmp); |
728 | |
729 | mm->map_count++; |
730 | |
731 | if (tmp->vm_ops && tmp->vm_ops->open) |
732 | tmp->vm_ops->open(tmp); |
733 | |
734 | file = tmp->vm_file; |
735 | if (file) { |
736 | struct address_space *mapping = file->f_mapping; |
737 | |
738 | get_file(f: file); |
739 | i_mmap_lock_write(mapping); |
740 | if (vma_is_shared_maywrite(vma: tmp)) |
741 | mapping_allow_writable(mapping); |
742 | flush_dcache_mmap_lock(mapping); |
743 | /* insert tmp into the share list, just after mpnt */ |
744 | vma_interval_tree_insert_after(node: tmp, prev: mpnt, |
745 | root: &mapping->i_mmap); |
746 | flush_dcache_mmap_unlock(mapping); |
747 | i_mmap_unlock_write(mapping); |
748 | } |
749 | |
750 | if (!(tmp->vm_flags & VM_WIPEONFORK)) |
751 | retval = copy_page_range(dst_vma: tmp, src_vma: mpnt); |
752 | |
753 | if (retval) { |
754 | mpnt = vma_next(vmi: &vmi); |
755 | goto loop_out; |
756 | } |
757 | } |
758 | /* a new mm has just been created */ |
759 | retval = arch_dup_mmap(oldmm, mm); |
760 | loop_out: |
761 | vma_iter_free(vmi: &vmi); |
762 | if (!retval) { |
763 | mt_set_in_rcu(mt: vmi.mas.tree); |
764 | } else if (mpnt) { |
765 | /* |
766 | * The entire maple tree has already been duplicated. If the |
767 | * mmap duplication fails, mark the failure point with |
768 | * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, |
769 | * stop releasing VMAs that have not been duplicated after this |
770 | * point. |
771 | */ |
772 | mas_set_range(mas: &vmi.mas, start: mpnt->vm_start, last: mpnt->vm_end - 1); |
773 | mas_store(mas: &vmi.mas, XA_ZERO_ENTRY); |
774 | } |
775 | out: |
776 | mmap_write_unlock(mm); |
777 | flush_tlb_mm(oldmm); |
778 | mmap_write_unlock(mm: oldmm); |
779 | dup_userfaultfd_complete(&uf); |
780 | fail_uprobe_end: |
781 | uprobe_end_dup_mmap(); |
782 | return retval; |
783 | |
784 | fail_nomem_anon_vma_fork: |
785 | mpol_put(vma_policy(tmp)); |
786 | fail_nomem_policy: |
787 | vm_area_free(vma: tmp); |
788 | fail_nomem: |
789 | retval = -ENOMEM; |
790 | vm_unacct_memory(pages: charge); |
791 | goto loop_out; |
792 | } |
793 | |
794 | static inline int mm_alloc_pgd(struct mm_struct *mm) |
795 | { |
796 | mm->pgd = pgd_alloc(mm); |
797 | if (unlikely(!mm->pgd)) |
798 | return -ENOMEM; |
799 | return 0; |
800 | } |
801 | |
802 | static inline void mm_free_pgd(struct mm_struct *mm) |
803 | { |
804 | pgd_free(mm, pgd: mm->pgd); |
805 | } |
806 | #else |
807 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
808 | { |
809 | mmap_write_lock(oldmm); |
810 | dup_mm_exe_file(mm, oldmm); |
811 | mmap_write_unlock(oldmm); |
812 | return 0; |
813 | } |
814 | #define mm_alloc_pgd(mm) (0) |
815 | #define mm_free_pgd(mm) |
816 | #endif /* CONFIG_MMU */ |
817 | |
818 | static void check_mm(struct mm_struct *mm) |
819 | { |
820 | int i; |
821 | |
822 | BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, |
823 | "Please make sure 'struct resident_page_types[]' is updated as well" ); |
824 | |
825 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
826 | long x = percpu_counter_sum(fbc: &mm->rss_stat[i]); |
827 | |
828 | if (unlikely(x)) |
829 | pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n" , |
830 | mm, resident_page_types[i], x); |
831 | } |
832 | |
833 | if (mm_pgtables_bytes(mm)) |
834 | pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n" , |
835 | mm_pgtables_bytes(mm)); |
836 | |
837 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
838 | VM_BUG_ON_MM(mm->pmd_huge_pte, mm); |
839 | #endif |
840 | } |
841 | |
842 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) |
843 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) |
844 | |
845 | static void do_check_lazy_tlb(void *arg) |
846 | { |
847 | struct mm_struct *mm = arg; |
848 | |
849 | WARN_ON_ONCE(current->active_mm == mm); |
850 | } |
851 | |
852 | static void do_shoot_lazy_tlb(void *arg) |
853 | { |
854 | struct mm_struct *mm = arg; |
855 | |
856 | if (current->active_mm == mm) { |
857 | WARN_ON_ONCE(current->mm); |
858 | current->active_mm = &init_mm; |
859 | switch_mm(prev: mm, next: &init_mm, current); |
860 | } |
861 | } |
862 | |
863 | static void cleanup_lazy_tlbs(struct mm_struct *mm) |
864 | { |
865 | if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) { |
866 | /* |
867 | * In this case, lazy tlb mms are refounted and would not reach |
868 | * __mmdrop until all CPUs have switched away and mmdrop()ed. |
869 | */ |
870 | return; |
871 | } |
872 | |
873 | /* |
874 | * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it |
875 | * requires lazy mm users to switch to another mm when the refcount |
876 | * drops to zero, before the mm is freed. This requires IPIs here to |
877 | * switch kernel threads to init_mm. |
878 | * |
879 | * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm |
880 | * switch with the final userspace teardown TLB flush which leaves the |
881 | * mm lazy on this CPU but no others, reducing the need for additional |
882 | * IPIs here. There are cases where a final IPI is still required here, |
883 | * such as the final mmdrop being performed on a different CPU than the |
884 | * one exiting, or kernel threads using the mm when userspace exits. |
885 | * |
886 | * IPI overheads have not found to be expensive, but they could be |
887 | * reduced in a number of possible ways, for example (roughly |
888 | * increasing order of complexity): |
889 | * - The last lazy reference created by exit_mm() could instead switch |
890 | * to init_mm, however it's probable this will run on the same CPU |
891 | * immediately afterwards, so this may not reduce IPIs much. |
892 | * - A batch of mms requiring IPIs could be gathered and freed at once. |
893 | * - CPUs store active_mm where it can be remotely checked without a |
894 | * lock, to filter out false-positives in the cpumask. |
895 | * - After mm_users or mm_count reaches zero, switching away from the |
896 | * mm could clear mm_cpumask to reduce some IPIs, perhaps together |
897 | * with some batching or delaying of the final IPIs. |
898 | * - A delayed freeing and RCU-like quiescing sequence based on mm |
899 | * switching to avoid IPIs completely. |
900 | */ |
901 | on_each_cpu_mask(mask: mm_cpumask(mm), func: do_shoot_lazy_tlb, info: (void *)mm, wait: 1); |
902 | if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES)) |
903 | on_each_cpu(func: do_check_lazy_tlb, info: (void *)mm, wait: 1); |
904 | } |
905 | |
906 | /* |
907 | * Called when the last reference to the mm |
908 | * is dropped: either by a lazy thread or by |
909 | * mmput. Free the page directory and the mm. |
910 | */ |
911 | void __mmdrop(struct mm_struct *mm) |
912 | { |
913 | BUG_ON(mm == &init_mm); |
914 | WARN_ON_ONCE(mm == current->mm); |
915 | |
916 | /* Ensure no CPUs are using this as their lazy tlb mm */ |
917 | cleanup_lazy_tlbs(mm); |
918 | |
919 | WARN_ON_ONCE(mm == current->active_mm); |
920 | mm_free_pgd(mm); |
921 | destroy_context(mm); |
922 | mmu_notifier_subscriptions_destroy(mm); |
923 | check_mm(mm); |
924 | put_user_ns(ns: mm->user_ns); |
925 | mm_pasid_drop(mm); |
926 | mm_destroy_cid(mm); |
927 | percpu_counter_destroy_many(fbc: mm->rss_stat, nr_counters: NR_MM_COUNTERS); |
928 | |
929 | free_mm(mm); |
930 | } |
931 | EXPORT_SYMBOL_GPL(__mmdrop); |
932 | |
933 | static void mmdrop_async_fn(struct work_struct *work) |
934 | { |
935 | struct mm_struct *mm; |
936 | |
937 | mm = container_of(work, struct mm_struct, async_put_work); |
938 | __mmdrop(mm); |
939 | } |
940 | |
941 | static void mmdrop_async(struct mm_struct *mm) |
942 | { |
943 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) { |
944 | INIT_WORK(&mm->async_put_work, mmdrop_async_fn); |
945 | schedule_work(work: &mm->async_put_work); |
946 | } |
947 | } |
948 | |
949 | static inline void free_signal_struct(struct signal_struct *sig) |
950 | { |
951 | taskstats_tgid_free(sig); |
952 | sched_autogroup_exit(sig); |
953 | /* |
954 | * __mmdrop is not safe to call from softirq context on x86 due to |
955 | * pgd_dtor so postpone it to the async context |
956 | */ |
957 | if (sig->oom_mm) |
958 | mmdrop_async(mm: sig->oom_mm); |
959 | kmem_cache_free(s: signal_cachep, objp: sig); |
960 | } |
961 | |
962 | static inline void put_signal_struct(struct signal_struct *sig) |
963 | { |
964 | if (refcount_dec_and_test(r: &sig->sigcnt)) |
965 | free_signal_struct(sig); |
966 | } |
967 | |
968 | void __put_task_struct(struct task_struct *tsk) |
969 | { |
970 | WARN_ON(!tsk->exit_state); |
971 | WARN_ON(refcount_read(&tsk->usage)); |
972 | WARN_ON(tsk == current); |
973 | |
974 | io_uring_free(tsk); |
975 | cgroup_free(p: tsk); |
976 | task_numa_free(p: tsk, final: true); |
977 | security_task_free(task: tsk); |
978 | exit_creds(tsk); |
979 | delayacct_tsk_free(tsk); |
980 | put_signal_struct(sig: tsk->signal); |
981 | sched_core_free(tsk); |
982 | free_task(tsk); |
983 | } |
984 | EXPORT_SYMBOL_GPL(__put_task_struct); |
985 | |
986 | void __put_task_struct_rcu_cb(struct rcu_head *rhp) |
987 | { |
988 | struct task_struct *task = container_of(rhp, struct task_struct, rcu); |
989 | |
990 | __put_task_struct(task); |
991 | } |
992 | EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb); |
993 | |
994 | void __init __weak arch_task_cache_init(void) { } |
995 | |
996 | /* |
997 | * set_max_threads |
998 | */ |
999 | static void set_max_threads(unsigned int max_threads_suggested) |
1000 | { |
1001 | u64 threads; |
1002 | unsigned long nr_pages = totalram_pages(); |
1003 | |
1004 | /* |
1005 | * The number of threads shall be limited such that the thread |
1006 | * structures may only consume a small part of the available memory. |
1007 | */ |
1008 | if (fls64(x: nr_pages) + fls64(PAGE_SIZE) > 64) |
1009 | threads = MAX_THREADS; |
1010 | else |
1011 | threads = div64_u64(dividend: (u64) nr_pages * (u64) PAGE_SIZE, |
1012 | divisor: (u64) THREAD_SIZE * 8UL); |
1013 | |
1014 | if (threads > max_threads_suggested) |
1015 | threads = max_threads_suggested; |
1016 | |
1017 | max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); |
1018 | } |
1019 | |
1020 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT |
1021 | /* Initialized by the architecture: */ |
1022 | int arch_task_struct_size __read_mostly; |
1023 | #endif |
1024 | |
1025 | static void task_struct_whitelist(unsigned long *offset, unsigned long *size) |
1026 | { |
1027 | /* Fetch thread_struct whitelist for the architecture. */ |
1028 | arch_thread_struct_whitelist(offset, size); |
1029 | |
1030 | /* |
1031 | * Handle zero-sized whitelist or empty thread_struct, otherwise |
1032 | * adjust offset to position of thread_struct in task_struct. |
1033 | */ |
1034 | if (unlikely(*size == 0)) |
1035 | *offset = 0; |
1036 | else |
1037 | *offset += offsetof(struct task_struct, thread); |
1038 | } |
1039 | |
1040 | void __init fork_init(void) |
1041 | { |
1042 | int i; |
1043 | #ifndef ARCH_MIN_TASKALIGN |
1044 | #define ARCH_MIN_TASKALIGN 0 |
1045 | #endif |
1046 | int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); |
1047 | unsigned long useroffset, usersize; |
1048 | |
1049 | /* create a slab on which task_structs can be allocated */ |
1050 | task_struct_whitelist(offset: &useroffset, size: &usersize); |
1051 | task_struct_cachep = kmem_cache_create_usercopy(name: "task_struct" , |
1052 | size: arch_task_struct_size, align, |
1053 | SLAB_PANIC|SLAB_ACCOUNT, |
1054 | useroffset, usersize, NULL); |
1055 | |
1056 | /* do the arch specific task caches init */ |
1057 | arch_task_cache_init(); |
1058 | |
1059 | set_max_threads(MAX_THREADS); |
1060 | |
1061 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
1062 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; |
1063 | init_task.signal->rlim[RLIMIT_SIGPENDING] = |
1064 | init_task.signal->rlim[RLIMIT_NPROC]; |
1065 | |
1066 | for (i = 0; i < UCOUNT_COUNTS; i++) |
1067 | init_user_ns.ucount_max[i] = max_threads/2; |
1068 | |
1069 | set_userns_rlimit_max(ns: &init_user_ns, type: UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); |
1070 | set_userns_rlimit_max(ns: &init_user_ns, type: UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); |
1071 | set_userns_rlimit_max(ns: &init_user_ns, type: UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); |
1072 | set_userns_rlimit_max(ns: &init_user_ns, type: UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); |
1073 | |
1074 | #ifdef CONFIG_VMAP_STACK |
1075 | cpuhp_setup_state(state: CPUHP_BP_PREPARE_DYN, name: "fork:vm_stack_cache" , |
1076 | NULL, teardown: free_vm_stack_cache); |
1077 | #endif |
1078 | |
1079 | scs_init(); |
1080 | |
1081 | lockdep_init_task(task: &init_task); |
1082 | uprobes_init(); |
1083 | } |
1084 | |
1085 | int __weak arch_dup_task_struct(struct task_struct *dst, |
1086 | struct task_struct *src) |
1087 | { |
1088 | *dst = *src; |
1089 | return 0; |
1090 | } |
1091 | |
1092 | void set_task_stack_end_magic(struct task_struct *tsk) |
1093 | { |
1094 | unsigned long *stackend; |
1095 | |
1096 | stackend = end_of_stack(task: tsk); |
1097 | *stackend = STACK_END_MAGIC; /* for overflow detection */ |
1098 | } |
1099 | |
1100 | static struct task_struct *dup_task_struct(struct task_struct *orig, int node) |
1101 | { |
1102 | struct task_struct *tsk; |
1103 | int err; |
1104 | |
1105 | if (node == NUMA_NO_NODE) |
1106 | node = tsk_fork_get_node(tsk: orig); |
1107 | tsk = alloc_task_struct_node(node); |
1108 | if (!tsk) |
1109 | return NULL; |
1110 | |
1111 | err = arch_dup_task_struct(dst: tsk, src: orig); |
1112 | if (err) |
1113 | goto free_tsk; |
1114 | |
1115 | err = alloc_thread_stack_node(tsk, node); |
1116 | if (err) |
1117 | goto free_tsk; |
1118 | |
1119 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
1120 | refcount_set(r: &tsk->stack_refcount, n: 1); |
1121 | #endif |
1122 | account_kernel_stack(tsk, account: 1); |
1123 | |
1124 | err = scs_prepare(tsk, node); |
1125 | if (err) |
1126 | goto free_stack; |
1127 | |
1128 | #ifdef CONFIG_SECCOMP |
1129 | /* |
1130 | * We must handle setting up seccomp filters once we're under |
1131 | * the sighand lock in case orig has changed between now and |
1132 | * then. Until then, filter must be NULL to avoid messing up |
1133 | * the usage counts on the error path calling free_task. |
1134 | */ |
1135 | tsk->seccomp.filter = NULL; |
1136 | #endif |
1137 | |
1138 | setup_thread_stack(tsk, orig); |
1139 | clear_user_return_notifier(p: tsk); |
1140 | clear_tsk_need_resched(tsk); |
1141 | set_task_stack_end_magic(tsk); |
1142 | clear_syscall_work_syscall_user_dispatch(tsk); |
1143 | |
1144 | #ifdef CONFIG_STACKPROTECTOR |
1145 | tsk->stack_canary = get_random_canary(); |
1146 | #endif |
1147 | if (orig->cpus_ptr == &orig->cpus_mask) |
1148 | tsk->cpus_ptr = &tsk->cpus_mask; |
1149 | dup_user_cpus_ptr(dst: tsk, src: orig, node); |
1150 | |
1151 | /* |
1152 | * One for the user space visible state that goes away when reaped. |
1153 | * One for the scheduler. |
1154 | */ |
1155 | refcount_set(r: &tsk->rcu_users, n: 2); |
1156 | /* One for the rcu users */ |
1157 | refcount_set(r: &tsk->usage, n: 1); |
1158 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
1159 | tsk->btrace_seq = 0; |
1160 | #endif |
1161 | tsk->splice_pipe = NULL; |
1162 | tsk->task_frag.page = NULL; |
1163 | tsk->wake_q.next = NULL; |
1164 | tsk->worker_private = NULL; |
1165 | |
1166 | kcov_task_init(t: tsk); |
1167 | kmsan_task_create(task: tsk); |
1168 | kmap_local_fork(tsk); |
1169 | |
1170 | #ifdef CONFIG_FAULT_INJECTION |
1171 | tsk->fail_nth = 0; |
1172 | #endif |
1173 | |
1174 | #ifdef CONFIG_BLK_CGROUP |
1175 | tsk->throttle_disk = NULL; |
1176 | tsk->use_memdelay = 0; |
1177 | #endif |
1178 | |
1179 | #ifdef CONFIG_ARCH_HAS_CPU_PASID |
1180 | tsk->pasid_activated = 0; |
1181 | #endif |
1182 | |
1183 | #ifdef CONFIG_MEMCG |
1184 | tsk->active_memcg = NULL; |
1185 | #endif |
1186 | |
1187 | #ifdef CONFIG_CPU_SUP_INTEL |
1188 | tsk->reported_split_lock = 0; |
1189 | #endif |
1190 | |
1191 | #ifdef CONFIG_SCHED_MM_CID |
1192 | tsk->mm_cid = -1; |
1193 | tsk->last_mm_cid = -1; |
1194 | tsk->mm_cid_active = 0; |
1195 | tsk->migrate_from_cpu = -1; |
1196 | #endif |
1197 | return tsk; |
1198 | |
1199 | free_stack: |
1200 | exit_task_stack_account(tsk); |
1201 | free_thread_stack(tsk); |
1202 | free_tsk: |
1203 | free_task_struct(tsk); |
1204 | return NULL; |
1205 | } |
1206 | |
1207 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); |
1208 | |
1209 | static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; |
1210 | |
1211 | static int __init coredump_filter_setup(char *s) |
1212 | { |
1213 | default_dump_filter = |
1214 | (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & |
1215 | MMF_DUMP_FILTER_MASK; |
1216 | return 1; |
1217 | } |
1218 | |
1219 | __setup("coredump_filter=" , coredump_filter_setup); |
1220 | |
1221 | #include <linux/init_task.h> |
1222 | |
1223 | static void mm_init_aio(struct mm_struct *mm) |
1224 | { |
1225 | #ifdef CONFIG_AIO |
1226 | spin_lock_init(&mm->ioctx_lock); |
1227 | mm->ioctx_table = NULL; |
1228 | #endif |
1229 | } |
1230 | |
1231 | static __always_inline void mm_clear_owner(struct mm_struct *mm, |
1232 | struct task_struct *p) |
1233 | { |
1234 | #ifdef CONFIG_MEMCG |
1235 | if (mm->owner == p) |
1236 | WRITE_ONCE(mm->owner, NULL); |
1237 | #endif |
1238 | } |
1239 | |
1240 | static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) |
1241 | { |
1242 | #ifdef CONFIG_MEMCG |
1243 | mm->owner = p; |
1244 | #endif |
1245 | } |
1246 | |
1247 | static void mm_init_uprobes_state(struct mm_struct *mm) |
1248 | { |
1249 | #ifdef CONFIG_UPROBES |
1250 | mm->uprobes_state.xol_area = NULL; |
1251 | #endif |
1252 | } |
1253 | |
1254 | static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, |
1255 | struct user_namespace *user_ns) |
1256 | { |
1257 | mt_init_flags(mt: &mm->mm_mt, MM_MT_FLAGS); |
1258 | mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); |
1259 | atomic_set(v: &mm->mm_users, i: 1); |
1260 | atomic_set(v: &mm->mm_count, i: 1); |
1261 | seqcount_init(&mm->write_protect_seq); |
1262 | mmap_init_lock(mm); |
1263 | INIT_LIST_HEAD(list: &mm->mmlist); |
1264 | #ifdef CONFIG_PER_VMA_LOCK |
1265 | mm->mm_lock_seq = 0; |
1266 | #endif |
1267 | mm_pgtables_bytes_init(mm); |
1268 | mm->map_count = 0; |
1269 | mm->locked_vm = 0; |
1270 | atomic64_set(v: &mm->pinned_vm, i: 0); |
1271 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
1272 | spin_lock_init(&mm->page_table_lock); |
1273 | spin_lock_init(&mm->arg_lock); |
1274 | mm_init_cpumask(mm); |
1275 | mm_init_aio(mm); |
1276 | mm_init_owner(mm, p); |
1277 | mm_pasid_init(mm); |
1278 | RCU_INIT_POINTER(mm->exe_file, NULL); |
1279 | mmu_notifier_subscriptions_init(mm); |
1280 | init_tlb_flush_pending(mm); |
1281 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
1282 | mm->pmd_huge_pte = NULL; |
1283 | #endif |
1284 | mm_init_uprobes_state(mm); |
1285 | hugetlb_count_init(mm); |
1286 | |
1287 | if (current->mm) { |
1288 | mm->flags = mmf_init_flags(current->mm->flags); |
1289 | mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; |
1290 | } else { |
1291 | mm->flags = default_dump_filter; |
1292 | mm->def_flags = 0; |
1293 | } |
1294 | |
1295 | if (mm_alloc_pgd(mm)) |
1296 | goto fail_nopgd; |
1297 | |
1298 | if (init_new_context(tsk: p, mm)) |
1299 | goto fail_nocontext; |
1300 | |
1301 | if (mm_alloc_cid(mm)) |
1302 | goto fail_cid; |
1303 | |
1304 | if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, |
1305 | NR_MM_COUNTERS)) |
1306 | goto fail_pcpu; |
1307 | |
1308 | mm->user_ns = get_user_ns(ns: user_ns); |
1309 | lru_gen_init_mm(mm); |
1310 | return mm; |
1311 | |
1312 | fail_pcpu: |
1313 | mm_destroy_cid(mm); |
1314 | fail_cid: |
1315 | destroy_context(mm); |
1316 | fail_nocontext: |
1317 | mm_free_pgd(mm); |
1318 | fail_nopgd: |
1319 | free_mm(mm); |
1320 | return NULL; |
1321 | } |
1322 | |
1323 | /* |
1324 | * Allocate and initialize an mm_struct. |
1325 | */ |
1326 | struct mm_struct *mm_alloc(void) |
1327 | { |
1328 | struct mm_struct *mm; |
1329 | |
1330 | mm = allocate_mm(); |
1331 | if (!mm) |
1332 | return NULL; |
1333 | |
1334 | memset(mm, 0, sizeof(*mm)); |
1335 | return mm_init(mm, current, current_user_ns()); |
1336 | } |
1337 | |
1338 | static inline void __mmput(struct mm_struct *mm) |
1339 | { |
1340 | VM_BUG_ON(atomic_read(&mm->mm_users)); |
1341 | |
1342 | uprobe_clear_state(mm); |
1343 | exit_aio(mm); |
1344 | ksm_exit(mm); |
1345 | khugepaged_exit(mm); /* must run before exit_mmap */ |
1346 | exit_mmap(mm); |
1347 | mm_put_huge_zero_page(mm); |
1348 | set_mm_exe_file(mm, NULL); |
1349 | if (!list_empty(head: &mm->mmlist)) { |
1350 | spin_lock(lock: &mmlist_lock); |
1351 | list_del(entry: &mm->mmlist); |
1352 | spin_unlock(lock: &mmlist_lock); |
1353 | } |
1354 | if (mm->binfmt) |
1355 | module_put(module: mm->binfmt->module); |
1356 | lru_gen_del_mm(mm); |
1357 | mmdrop(mm); |
1358 | } |
1359 | |
1360 | /* |
1361 | * Decrement the use count and release all resources for an mm. |
1362 | */ |
1363 | void mmput(struct mm_struct *mm) |
1364 | { |
1365 | might_sleep(); |
1366 | |
1367 | if (atomic_dec_and_test(v: &mm->mm_users)) |
1368 | __mmput(mm); |
1369 | } |
1370 | EXPORT_SYMBOL_GPL(mmput); |
1371 | |
1372 | #ifdef CONFIG_MMU |
1373 | static void mmput_async_fn(struct work_struct *work) |
1374 | { |
1375 | struct mm_struct *mm = container_of(work, struct mm_struct, |
1376 | async_put_work); |
1377 | |
1378 | __mmput(mm); |
1379 | } |
1380 | |
1381 | void mmput_async(struct mm_struct *mm) |
1382 | { |
1383 | if (atomic_dec_and_test(v: &mm->mm_users)) { |
1384 | INIT_WORK(&mm->async_put_work, mmput_async_fn); |
1385 | schedule_work(work: &mm->async_put_work); |
1386 | } |
1387 | } |
1388 | EXPORT_SYMBOL_GPL(mmput_async); |
1389 | #endif |
1390 | |
1391 | /** |
1392 | * set_mm_exe_file - change a reference to the mm's executable file |
1393 | * @mm: The mm to change. |
1394 | * @new_exe_file: The new file to use. |
1395 | * |
1396 | * This changes mm's executable file (shown as symlink /proc/[pid]/exe). |
1397 | * |
1398 | * Main users are mmput() and sys_execve(). Callers prevent concurrent |
1399 | * invocations: in mmput() nobody alive left, in execve it happens before |
1400 | * the new mm is made visible to anyone. |
1401 | * |
1402 | * Can only fail if new_exe_file != NULL. |
1403 | */ |
1404 | int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) |
1405 | { |
1406 | struct file *old_exe_file; |
1407 | |
1408 | /* |
1409 | * It is safe to dereference the exe_file without RCU as |
1410 | * this function is only called if nobody else can access |
1411 | * this mm -- see comment above for justification. |
1412 | */ |
1413 | old_exe_file = rcu_dereference_raw(mm->exe_file); |
1414 | |
1415 | if (new_exe_file) { |
1416 | /* |
1417 | * We expect the caller (i.e., sys_execve) to already denied |
1418 | * write access, so this is unlikely to fail. |
1419 | */ |
1420 | if (unlikely(deny_write_access(new_exe_file))) |
1421 | return -EACCES; |
1422 | get_file(f: new_exe_file); |
1423 | } |
1424 | rcu_assign_pointer(mm->exe_file, new_exe_file); |
1425 | if (old_exe_file) { |
1426 | allow_write_access(file: old_exe_file); |
1427 | fput(old_exe_file); |
1428 | } |
1429 | return 0; |
1430 | } |
1431 | |
1432 | /** |
1433 | * replace_mm_exe_file - replace a reference to the mm's executable file |
1434 | * @mm: The mm to change. |
1435 | * @new_exe_file: The new file to use. |
1436 | * |
1437 | * This changes mm's executable file (shown as symlink /proc/[pid]/exe). |
1438 | * |
1439 | * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). |
1440 | */ |
1441 | int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) |
1442 | { |
1443 | struct vm_area_struct *vma; |
1444 | struct file *old_exe_file; |
1445 | int ret = 0; |
1446 | |
1447 | /* Forbid mm->exe_file change if old file still mapped. */ |
1448 | old_exe_file = get_mm_exe_file(mm); |
1449 | if (old_exe_file) { |
1450 | VMA_ITERATOR(vmi, mm, 0); |
1451 | mmap_read_lock(mm); |
1452 | for_each_vma(vmi, vma) { |
1453 | if (!vma->vm_file) |
1454 | continue; |
1455 | if (path_equal(path1: &vma->vm_file->f_path, |
1456 | path2: &old_exe_file->f_path)) { |
1457 | ret = -EBUSY; |
1458 | break; |
1459 | } |
1460 | } |
1461 | mmap_read_unlock(mm); |
1462 | fput(old_exe_file); |
1463 | if (ret) |
1464 | return ret; |
1465 | } |
1466 | |
1467 | ret = deny_write_access(file: new_exe_file); |
1468 | if (ret) |
1469 | return -EACCES; |
1470 | get_file(f: new_exe_file); |
1471 | |
1472 | /* set the new file */ |
1473 | mmap_write_lock(mm); |
1474 | old_exe_file = rcu_dereference_raw(mm->exe_file); |
1475 | rcu_assign_pointer(mm->exe_file, new_exe_file); |
1476 | mmap_write_unlock(mm); |
1477 | |
1478 | if (old_exe_file) { |
1479 | allow_write_access(file: old_exe_file); |
1480 | fput(old_exe_file); |
1481 | } |
1482 | return 0; |
1483 | } |
1484 | |
1485 | /** |
1486 | * get_mm_exe_file - acquire a reference to the mm's executable file |
1487 | * @mm: The mm of interest. |
1488 | * |
1489 | * Returns %NULL if mm has no associated executable file. |
1490 | * User must release file via fput(). |
1491 | */ |
1492 | struct file *get_mm_exe_file(struct mm_struct *mm) |
1493 | { |
1494 | struct file *exe_file; |
1495 | |
1496 | rcu_read_lock(); |
1497 | exe_file = get_file_rcu(f: &mm->exe_file); |
1498 | rcu_read_unlock(); |
1499 | return exe_file; |
1500 | } |
1501 | |
1502 | /** |
1503 | * get_task_exe_file - acquire a reference to the task's executable file |
1504 | * @task: The task. |
1505 | * |
1506 | * Returns %NULL if task's mm (if any) has no associated executable file or |
1507 | * this is a kernel thread with borrowed mm (see the comment above get_task_mm). |
1508 | * User must release file via fput(). |
1509 | */ |
1510 | struct file *get_task_exe_file(struct task_struct *task) |
1511 | { |
1512 | struct file *exe_file = NULL; |
1513 | struct mm_struct *mm; |
1514 | |
1515 | task_lock(p: task); |
1516 | mm = task->mm; |
1517 | if (mm) { |
1518 | if (!(task->flags & PF_KTHREAD)) |
1519 | exe_file = get_mm_exe_file(mm); |
1520 | } |
1521 | task_unlock(p: task); |
1522 | return exe_file; |
1523 | } |
1524 | |
1525 | /** |
1526 | * get_task_mm - acquire a reference to the task's mm |
1527 | * @task: The task. |
1528 | * |
1529 | * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning |
1530 | * this kernel workthread has transiently adopted a user mm with use_mm, |
1531 | * to do its AIO) is not set and if so returns a reference to it, after |
1532 | * bumping up the use count. User must release the mm via mmput() |
1533 | * after use. Typically used by /proc and ptrace. |
1534 | */ |
1535 | struct mm_struct *get_task_mm(struct task_struct *task) |
1536 | { |
1537 | struct mm_struct *mm; |
1538 | |
1539 | task_lock(p: task); |
1540 | mm = task->mm; |
1541 | if (mm) { |
1542 | if (task->flags & PF_KTHREAD) |
1543 | mm = NULL; |
1544 | else |
1545 | mmget(mm); |
1546 | } |
1547 | task_unlock(p: task); |
1548 | return mm; |
1549 | } |
1550 | EXPORT_SYMBOL_GPL(get_task_mm); |
1551 | |
1552 | struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) |
1553 | { |
1554 | struct mm_struct *mm; |
1555 | int err; |
1556 | |
1557 | err = down_read_killable(sem: &task->signal->exec_update_lock); |
1558 | if (err) |
1559 | return ERR_PTR(error: err); |
1560 | |
1561 | mm = get_task_mm(task); |
1562 | if (mm && mm != current->mm && |
1563 | !ptrace_may_access(task, mode)) { |
1564 | mmput(mm); |
1565 | mm = ERR_PTR(error: -EACCES); |
1566 | } |
1567 | up_read(sem: &task->signal->exec_update_lock); |
1568 | |
1569 | return mm; |
1570 | } |
1571 | |
1572 | static void complete_vfork_done(struct task_struct *tsk) |
1573 | { |
1574 | struct completion *vfork; |
1575 | |
1576 | task_lock(p: tsk); |
1577 | vfork = tsk->vfork_done; |
1578 | if (likely(vfork)) { |
1579 | tsk->vfork_done = NULL; |
1580 | complete(vfork); |
1581 | } |
1582 | task_unlock(p: tsk); |
1583 | } |
1584 | |
1585 | static int wait_for_vfork_done(struct task_struct *child, |
1586 | struct completion *vfork) |
1587 | { |
1588 | unsigned int state = TASK_KILLABLE|TASK_FREEZABLE; |
1589 | int killed; |
1590 | |
1591 | cgroup_enter_frozen(); |
1592 | killed = wait_for_completion_state(x: vfork, state); |
1593 | cgroup_leave_frozen(always_leave: false); |
1594 | |
1595 | if (killed) { |
1596 | task_lock(p: child); |
1597 | child->vfork_done = NULL; |
1598 | task_unlock(p: child); |
1599 | } |
1600 | |
1601 | put_task_struct(t: child); |
1602 | return killed; |
1603 | } |
1604 | |
1605 | /* Please note the differences between mmput and mm_release. |
1606 | * mmput is called whenever we stop holding onto a mm_struct, |
1607 | * error success whatever. |
1608 | * |
1609 | * mm_release is called after a mm_struct has been removed |
1610 | * from the current process. |
1611 | * |
1612 | * This difference is important for error handling, when we |
1613 | * only half set up a mm_struct for a new process and need to restore |
1614 | * the old one. Because we mmput the new mm_struct before |
1615 | * restoring the old one. . . |
1616 | * Eric Biederman 10 January 1998 |
1617 | */ |
1618 | static void mm_release(struct task_struct *tsk, struct mm_struct *mm) |
1619 | { |
1620 | uprobe_free_utask(t: tsk); |
1621 | |
1622 | /* Get rid of any cached register state */ |
1623 | deactivate_mm(tsk, mm); |
1624 | |
1625 | /* |
1626 | * Signal userspace if we're not exiting with a core dump |
1627 | * because we want to leave the value intact for debugging |
1628 | * purposes. |
1629 | */ |
1630 | if (tsk->clear_child_tid) { |
1631 | if (atomic_read(v: &mm->mm_users) > 1) { |
1632 | /* |
1633 | * We don't check the error code - if userspace has |
1634 | * not set up a proper pointer then tough luck. |
1635 | */ |
1636 | put_user(0, tsk->clear_child_tid); |
1637 | do_futex(uaddr: tsk->clear_child_tid, FUTEX_WAKE, |
1638 | val: 1, NULL, NULL, val2: 0, val3: 0); |
1639 | } |
1640 | tsk->clear_child_tid = NULL; |
1641 | } |
1642 | |
1643 | /* |
1644 | * All done, finally we can wake up parent and return this mm to him. |
1645 | * Also kthread_stop() uses this completion for synchronization. |
1646 | */ |
1647 | if (tsk->vfork_done) |
1648 | complete_vfork_done(tsk); |
1649 | } |
1650 | |
1651 | void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) |
1652 | { |
1653 | futex_exit_release(tsk); |
1654 | mm_release(tsk, mm); |
1655 | } |
1656 | |
1657 | void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) |
1658 | { |
1659 | futex_exec_release(tsk); |
1660 | mm_release(tsk, mm); |
1661 | } |
1662 | |
1663 | /** |
1664 | * dup_mm() - duplicates an existing mm structure |
1665 | * @tsk: the task_struct with which the new mm will be associated. |
1666 | * @oldmm: the mm to duplicate. |
1667 | * |
1668 | * Allocates a new mm structure and duplicates the provided @oldmm structure |
1669 | * content into it. |
1670 | * |
1671 | * Return: the duplicated mm or NULL on failure. |
1672 | */ |
1673 | static struct mm_struct *dup_mm(struct task_struct *tsk, |
1674 | struct mm_struct *oldmm) |
1675 | { |
1676 | struct mm_struct *mm; |
1677 | int err; |
1678 | |
1679 | mm = allocate_mm(); |
1680 | if (!mm) |
1681 | goto fail_nomem; |
1682 | |
1683 | memcpy(mm, oldmm, sizeof(*mm)); |
1684 | |
1685 | if (!mm_init(mm, p: tsk, user_ns: mm->user_ns)) |
1686 | goto fail_nomem; |
1687 | |
1688 | err = dup_mmap(mm, oldmm); |
1689 | if (err) |
1690 | goto free_pt; |
1691 | |
1692 | mm->hiwater_rss = get_mm_rss(mm); |
1693 | mm->hiwater_vm = mm->total_vm; |
1694 | |
1695 | if (mm->binfmt && !try_module_get(module: mm->binfmt->module)) |
1696 | goto free_pt; |
1697 | |
1698 | return mm; |
1699 | |
1700 | free_pt: |
1701 | /* don't put binfmt in mmput, we haven't got module yet */ |
1702 | mm->binfmt = NULL; |
1703 | mm_init_owner(mm, NULL); |
1704 | mmput(mm); |
1705 | |
1706 | fail_nomem: |
1707 | return NULL; |
1708 | } |
1709 | |
1710 | static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) |
1711 | { |
1712 | struct mm_struct *mm, *oldmm; |
1713 | |
1714 | tsk->min_flt = tsk->maj_flt = 0; |
1715 | tsk->nvcsw = tsk->nivcsw = 0; |
1716 | #ifdef CONFIG_DETECT_HUNG_TASK |
1717 | tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; |
1718 | tsk->last_switch_time = 0; |
1719 | #endif |
1720 | |
1721 | tsk->mm = NULL; |
1722 | tsk->active_mm = NULL; |
1723 | |
1724 | /* |
1725 | * Are we cloning a kernel thread? |
1726 | * |
1727 | * We need to steal a active VM for that.. |
1728 | */ |
1729 | oldmm = current->mm; |
1730 | if (!oldmm) |
1731 | return 0; |
1732 | |
1733 | if (clone_flags & CLONE_VM) { |
1734 | mmget(mm: oldmm); |
1735 | mm = oldmm; |
1736 | } else { |
1737 | mm = dup_mm(tsk, current->mm); |
1738 | if (!mm) |
1739 | return -ENOMEM; |
1740 | } |
1741 | |
1742 | tsk->mm = mm; |
1743 | tsk->active_mm = mm; |
1744 | sched_mm_cid_fork(t: tsk); |
1745 | return 0; |
1746 | } |
1747 | |
1748 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
1749 | { |
1750 | struct fs_struct *fs = current->fs; |
1751 | if (clone_flags & CLONE_FS) { |
1752 | /* tsk->fs is already what we want */ |
1753 | spin_lock(lock: &fs->lock); |
1754 | /* "users" and "in_exec" locked for check_unsafe_exec() */ |
1755 | if (fs->in_exec) { |
1756 | spin_unlock(lock: &fs->lock); |
1757 | return -EAGAIN; |
1758 | } |
1759 | fs->users++; |
1760 | spin_unlock(lock: &fs->lock); |
1761 | return 0; |
1762 | } |
1763 | tsk->fs = copy_fs_struct(fs); |
1764 | if (!tsk->fs) |
1765 | return -ENOMEM; |
1766 | return 0; |
1767 | } |
1768 | |
1769 | static int copy_files(unsigned long clone_flags, struct task_struct *tsk, |
1770 | int no_files) |
1771 | { |
1772 | struct files_struct *oldf, *newf; |
1773 | int error = 0; |
1774 | |
1775 | /* |
1776 | * A background process may not have any files ... |
1777 | */ |
1778 | oldf = current->files; |
1779 | if (!oldf) |
1780 | goto out; |
1781 | |
1782 | if (no_files) { |
1783 | tsk->files = NULL; |
1784 | goto out; |
1785 | } |
1786 | |
1787 | if (clone_flags & CLONE_FILES) { |
1788 | atomic_inc(v: &oldf->count); |
1789 | goto out; |
1790 | } |
1791 | |
1792 | newf = dup_fd(oldf, NR_OPEN_MAX, &error); |
1793 | if (!newf) |
1794 | goto out; |
1795 | |
1796 | tsk->files = newf; |
1797 | error = 0; |
1798 | out: |
1799 | return error; |
1800 | } |
1801 | |
1802 | static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) |
1803 | { |
1804 | struct sighand_struct *sig; |
1805 | |
1806 | if (clone_flags & CLONE_SIGHAND) { |
1807 | refcount_inc(r: ¤t->sighand->count); |
1808 | return 0; |
1809 | } |
1810 | sig = kmem_cache_alloc(cachep: sighand_cachep, GFP_KERNEL); |
1811 | RCU_INIT_POINTER(tsk->sighand, sig); |
1812 | if (!sig) |
1813 | return -ENOMEM; |
1814 | |
1815 | refcount_set(r: &sig->count, n: 1); |
1816 | spin_lock_irq(lock: ¤t->sighand->siglock); |
1817 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); |
1818 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
1819 | |
1820 | /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ |
1821 | if (clone_flags & CLONE_CLEAR_SIGHAND) |
1822 | flush_signal_handlers(tsk, force_default: 0); |
1823 | |
1824 | return 0; |
1825 | } |
1826 | |
1827 | void __cleanup_sighand(struct sighand_struct *sighand) |
1828 | { |
1829 | if (refcount_dec_and_test(r: &sighand->count)) { |
1830 | signalfd_cleanup(sighand); |
1831 | /* |
1832 | * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it |
1833 | * without an RCU grace period, see __lock_task_sighand(). |
1834 | */ |
1835 | kmem_cache_free(s: sighand_cachep, objp: sighand); |
1836 | } |
1837 | } |
1838 | |
1839 | /* |
1840 | * Initialize POSIX timer handling for a thread group. |
1841 | */ |
1842 | static void posix_cpu_timers_init_group(struct signal_struct *sig) |
1843 | { |
1844 | struct posix_cputimers *pct = &sig->posix_cputimers; |
1845 | unsigned long cpu_limit; |
1846 | |
1847 | cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
1848 | posix_cputimers_group_init(pct, cpu_limit); |
1849 | } |
1850 | |
1851 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
1852 | { |
1853 | struct signal_struct *sig; |
1854 | |
1855 | if (clone_flags & CLONE_THREAD) |
1856 | return 0; |
1857 | |
1858 | sig = kmem_cache_zalloc(k: signal_cachep, GFP_KERNEL); |
1859 | tsk->signal = sig; |
1860 | if (!sig) |
1861 | return -ENOMEM; |
1862 | |
1863 | sig->nr_threads = 1; |
1864 | sig->quick_threads = 1; |
1865 | atomic_set(v: &sig->live, i: 1); |
1866 | refcount_set(r: &sig->sigcnt, n: 1); |
1867 | |
1868 | /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ |
1869 | sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); |
1870 | tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); |
1871 | |
1872 | init_waitqueue_head(&sig->wait_chldexit); |
1873 | sig->curr_target = tsk; |
1874 | init_sigpending(sig: &sig->shared_pending); |
1875 | INIT_HLIST_HEAD(&sig->multiprocess); |
1876 | seqlock_init(&sig->stats_lock); |
1877 | prev_cputime_init(prev: &sig->prev_cputime); |
1878 | |
1879 | #ifdef CONFIG_POSIX_TIMERS |
1880 | INIT_LIST_HEAD(list: &sig->posix_timers); |
1881 | hrtimer_init(timer: &sig->real_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
1882 | sig->real_timer.function = it_real_fn; |
1883 | #endif |
1884 | |
1885 | task_lock(current->group_leader); |
1886 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
1887 | task_unlock(current->group_leader); |
1888 | |
1889 | posix_cpu_timers_init_group(sig); |
1890 | |
1891 | tty_audit_fork(sig); |
1892 | sched_autogroup_fork(sig); |
1893 | |
1894 | sig->oom_score_adj = current->signal->oom_score_adj; |
1895 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; |
1896 | |
1897 | mutex_init(&sig->cred_guard_mutex); |
1898 | init_rwsem(&sig->exec_update_lock); |
1899 | |
1900 | return 0; |
1901 | } |
1902 | |
1903 | static void copy_seccomp(struct task_struct *p) |
1904 | { |
1905 | #ifdef CONFIG_SECCOMP |
1906 | /* |
1907 | * Must be called with sighand->lock held, which is common to |
1908 | * all threads in the group. Holding cred_guard_mutex is not |
1909 | * needed because this new task is not yet running and cannot |
1910 | * be racing exec. |
1911 | */ |
1912 | assert_spin_locked(¤t->sighand->siglock); |
1913 | |
1914 | /* Ref-count the new filter user, and assign it. */ |
1915 | get_seccomp_filter(current); |
1916 | p->seccomp = current->seccomp; |
1917 | |
1918 | /* |
1919 | * Explicitly enable no_new_privs here in case it got set |
1920 | * between the task_struct being duplicated and holding the |
1921 | * sighand lock. The seccomp state and nnp must be in sync. |
1922 | */ |
1923 | if (task_no_new_privs(current)) |
1924 | task_set_no_new_privs(p); |
1925 | |
1926 | /* |
1927 | * If the parent gained a seccomp mode after copying thread |
1928 | * flags and between before we held the sighand lock, we have |
1929 | * to manually enable the seccomp thread flag here. |
1930 | */ |
1931 | if (p->seccomp.mode != SECCOMP_MODE_DISABLED) |
1932 | set_task_syscall_work(p, SECCOMP); |
1933 | #endif |
1934 | } |
1935 | |
1936 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
1937 | { |
1938 | current->clear_child_tid = tidptr; |
1939 | |
1940 | return task_pid_vnr(current); |
1941 | } |
1942 | |
1943 | static void rt_mutex_init_task(struct task_struct *p) |
1944 | { |
1945 | raw_spin_lock_init(&p->pi_lock); |
1946 | #ifdef CONFIG_RT_MUTEXES |
1947 | p->pi_waiters = RB_ROOT_CACHED; |
1948 | p->pi_top_task = NULL; |
1949 | p->pi_blocked_on = NULL; |
1950 | #endif |
1951 | } |
1952 | |
1953 | static inline void init_task_pid_links(struct task_struct *task) |
1954 | { |
1955 | enum pid_type type; |
1956 | |
1957 | for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) |
1958 | INIT_HLIST_NODE(h: &task->pid_links[type]); |
1959 | } |
1960 | |
1961 | static inline void |
1962 | init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) |
1963 | { |
1964 | if (type == PIDTYPE_PID) |
1965 | task->thread_pid = pid; |
1966 | else |
1967 | task->signal->pids[type] = pid; |
1968 | } |
1969 | |
1970 | static inline void rcu_copy_process(struct task_struct *p) |
1971 | { |
1972 | #ifdef CONFIG_PREEMPT_RCU |
1973 | p->rcu_read_lock_nesting = 0; |
1974 | p->rcu_read_unlock_special.s = 0; |
1975 | p->rcu_blocked_node = NULL; |
1976 | INIT_LIST_HEAD(list: &p->rcu_node_entry); |
1977 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
1978 | #ifdef CONFIG_TASKS_RCU |
1979 | p->rcu_tasks_holdout = false; |
1980 | INIT_LIST_HEAD(list: &p->rcu_tasks_holdout_list); |
1981 | p->rcu_tasks_idle_cpu = -1; |
1982 | INIT_LIST_HEAD(list: &p->rcu_tasks_exit_list); |
1983 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
1984 | #ifdef CONFIG_TASKS_TRACE_RCU |
1985 | p->trc_reader_nesting = 0; |
1986 | p->trc_reader_special.s = 0; |
1987 | INIT_LIST_HEAD(list: &p->trc_holdout_list); |
1988 | INIT_LIST_HEAD(list: &p->trc_blkd_node); |
1989 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
1990 | } |
1991 | |
1992 | /** |
1993 | * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd |
1994 | * @pid: the struct pid for which to create a pidfd |
1995 | * @flags: flags of the new @pidfd |
1996 | * @ret: Where to return the file for the pidfd. |
1997 | * |
1998 | * Allocate a new file that stashes @pid and reserve a new pidfd number in the |
1999 | * caller's file descriptor table. The pidfd is reserved but not installed yet. |
2000 | * |
2001 | * The helper doesn't perform checks on @pid which makes it useful for pidfds |
2002 | * created via CLONE_PIDFD where @pid has no task attached when the pidfd and |
2003 | * pidfd file are prepared. |
2004 | * |
2005 | * If this function returns successfully the caller is responsible to either |
2006 | * call fd_install() passing the returned pidfd and pidfd file as arguments in |
2007 | * order to install the pidfd into its file descriptor table or they must use |
2008 | * put_unused_fd() and fput() on the returned pidfd and pidfd file |
2009 | * respectively. |
2010 | * |
2011 | * This function is useful when a pidfd must already be reserved but there |
2012 | * might still be points of failure afterwards and the caller wants to ensure |
2013 | * that no pidfd is leaked into its file descriptor table. |
2014 | * |
2015 | * Return: On success, a reserved pidfd is returned from the function and a new |
2016 | * pidfd file is returned in the last argument to the function. On |
2017 | * error, a negative error code is returned from the function and the |
2018 | * last argument remains unchanged. |
2019 | */ |
2020 | static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) |
2021 | { |
2022 | int pidfd; |
2023 | struct file *pidfd_file; |
2024 | |
2025 | pidfd = get_unused_fd_flags(O_CLOEXEC); |
2026 | if (pidfd < 0) |
2027 | return pidfd; |
2028 | |
2029 | pidfd_file = pidfs_alloc_file(pid, flags: flags | O_RDWR); |
2030 | if (IS_ERR(ptr: pidfd_file)) { |
2031 | put_unused_fd(fd: pidfd); |
2032 | return PTR_ERR(ptr: pidfd_file); |
2033 | } |
2034 | /* |
2035 | * anon_inode_getfile() ignores everything outside of the |
2036 | * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually. |
2037 | */ |
2038 | pidfd_file->f_flags |= (flags & PIDFD_THREAD); |
2039 | *ret = pidfd_file; |
2040 | return pidfd; |
2041 | } |
2042 | |
2043 | /** |
2044 | * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd |
2045 | * @pid: the struct pid for which to create a pidfd |
2046 | * @flags: flags of the new @pidfd |
2047 | * @ret: Where to return the pidfd. |
2048 | * |
2049 | * Allocate a new file that stashes @pid and reserve a new pidfd number in the |
2050 | * caller's file descriptor table. The pidfd is reserved but not installed yet. |
2051 | * |
2052 | * The helper verifies that @pid is still in use, without PIDFD_THREAD the |
2053 | * task identified by @pid must be a thread-group leader. |
2054 | * |
2055 | * If this function returns successfully the caller is responsible to either |
2056 | * call fd_install() passing the returned pidfd and pidfd file as arguments in |
2057 | * order to install the pidfd into its file descriptor table or they must use |
2058 | * put_unused_fd() and fput() on the returned pidfd and pidfd file |
2059 | * respectively. |
2060 | * |
2061 | * This function is useful when a pidfd must already be reserved but there |
2062 | * might still be points of failure afterwards and the caller wants to ensure |
2063 | * that no pidfd is leaked into its file descriptor table. |
2064 | * |
2065 | * Return: On success, a reserved pidfd is returned from the function and a new |
2066 | * pidfd file is returned in the last argument to the function. On |
2067 | * error, a negative error code is returned from the function and the |
2068 | * last argument remains unchanged. |
2069 | */ |
2070 | int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) |
2071 | { |
2072 | bool thread = flags & PIDFD_THREAD; |
2073 | |
2074 | if (!pid || !pid_has_task(pid, type: thread ? PIDTYPE_PID : PIDTYPE_TGID)) |
2075 | return -EINVAL; |
2076 | |
2077 | return __pidfd_prepare(pid, flags, ret); |
2078 | } |
2079 | |
2080 | static void __delayed_free_task(struct rcu_head *rhp) |
2081 | { |
2082 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); |
2083 | |
2084 | free_task(tsk); |
2085 | } |
2086 | |
2087 | static __always_inline void delayed_free_task(struct task_struct *tsk) |
2088 | { |
2089 | if (IS_ENABLED(CONFIG_MEMCG)) |
2090 | call_rcu(head: &tsk->rcu, func: __delayed_free_task); |
2091 | else |
2092 | free_task(tsk); |
2093 | } |
2094 | |
2095 | static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) |
2096 | { |
2097 | /* Skip if kernel thread */ |
2098 | if (!tsk->mm) |
2099 | return; |
2100 | |
2101 | /* Skip if spawning a thread or using vfork */ |
2102 | if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) |
2103 | return; |
2104 | |
2105 | /* We need to synchronize with __set_oom_adj */ |
2106 | mutex_lock(&oom_adj_mutex); |
2107 | set_bit(MMF_MULTIPROCESS, addr: &tsk->mm->flags); |
2108 | /* Update the values in case they were changed after copy_signal */ |
2109 | tsk->signal->oom_score_adj = current->signal->oom_score_adj; |
2110 | tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; |
2111 | mutex_unlock(lock: &oom_adj_mutex); |
2112 | } |
2113 | |
2114 | #ifdef CONFIG_RV |
2115 | static void rv_task_fork(struct task_struct *p) |
2116 | { |
2117 | int i; |
2118 | |
2119 | for (i = 0; i < RV_PER_TASK_MONITORS; i++) |
2120 | p->rv[i].da_mon.monitoring = false; |
2121 | } |
2122 | #else |
2123 | #define rv_task_fork(p) do {} while (0) |
2124 | #endif |
2125 | |
2126 | /* |
2127 | * This creates a new process as a copy of the old one, |
2128 | * but does not actually start it yet. |
2129 | * |
2130 | * It copies the registers, and all the appropriate |
2131 | * parts of the process environment (as per the clone |
2132 | * flags). The actual kick-off is left to the caller. |
2133 | */ |
2134 | __latent_entropy struct task_struct *copy_process( |
2135 | struct pid *pid, |
2136 | int trace, |
2137 | int node, |
2138 | struct kernel_clone_args *args) |
2139 | { |
2140 | int pidfd = -1, retval; |
2141 | struct task_struct *p; |
2142 | struct multiprocess_signals delayed; |
2143 | struct file *pidfile = NULL; |
2144 | const u64 clone_flags = args->flags; |
2145 | struct nsproxy *nsp = current->nsproxy; |
2146 | |
2147 | /* |
2148 | * Don't allow sharing the root directory with processes in a different |
2149 | * namespace |
2150 | */ |
2151 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) |
2152 | return ERR_PTR(error: -EINVAL); |
2153 | |
2154 | if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) |
2155 | return ERR_PTR(error: -EINVAL); |
2156 | |
2157 | /* |
2158 | * Thread groups must share signals as well, and detached threads |
2159 | * can only be started up within the thread group. |
2160 | */ |
2161 | if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) |
2162 | return ERR_PTR(error: -EINVAL); |
2163 | |
2164 | /* |
2165 | * Shared signal handlers imply shared VM. By way of the above, |
2166 | * thread groups also imply shared VM. Blocking this case allows |
2167 | * for various simplifications in other code. |
2168 | */ |
2169 | if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) |
2170 | return ERR_PTR(error: -EINVAL); |
2171 | |
2172 | /* |
2173 | * Siblings of global init remain as zombies on exit since they are |
2174 | * not reaped by their parent (swapper). To solve this and to avoid |
2175 | * multi-rooted process trees, prevent global and container-inits |
2176 | * from creating siblings. |
2177 | */ |
2178 | if ((clone_flags & CLONE_PARENT) && |
2179 | current->signal->flags & SIGNAL_UNKILLABLE) |
2180 | return ERR_PTR(error: -EINVAL); |
2181 | |
2182 | /* |
2183 | * If the new process will be in a different pid or user namespace |
2184 | * do not allow it to share a thread group with the forking task. |
2185 | */ |
2186 | if (clone_flags & CLONE_THREAD) { |
2187 | if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || |
2188 | (task_active_pid_ns(current) != nsp->pid_ns_for_children)) |
2189 | return ERR_PTR(error: -EINVAL); |
2190 | } |
2191 | |
2192 | if (clone_flags & CLONE_PIDFD) { |
2193 | /* |
2194 | * - CLONE_DETACHED is blocked so that we can potentially |
2195 | * reuse it later for CLONE_PIDFD. |
2196 | */ |
2197 | if (clone_flags & CLONE_DETACHED) |
2198 | return ERR_PTR(error: -EINVAL); |
2199 | } |
2200 | |
2201 | /* |
2202 | * Force any signals received before this point to be delivered |
2203 | * before the fork happens. Collect up signals sent to multiple |
2204 | * processes that happen during the fork and delay them so that |
2205 | * they appear to happen after the fork. |
2206 | */ |
2207 | sigemptyset(set: &delayed.signal); |
2208 | INIT_HLIST_NODE(h: &delayed.node); |
2209 | |
2210 | spin_lock_irq(lock: ¤t->sighand->siglock); |
2211 | if (!(clone_flags & CLONE_THREAD)) |
2212 | hlist_add_head(n: &delayed.node, h: ¤t->signal->multiprocess); |
2213 | recalc_sigpending(); |
2214 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2215 | retval = -ERESTARTNOINTR; |
2216 | if (task_sigpending(current)) |
2217 | goto fork_out; |
2218 | |
2219 | retval = -ENOMEM; |
2220 | p = dup_task_struct(current, node); |
2221 | if (!p) |
2222 | goto fork_out; |
2223 | p->flags &= ~PF_KTHREAD; |
2224 | if (args->kthread) |
2225 | p->flags |= PF_KTHREAD; |
2226 | if (args->user_worker) { |
2227 | /* |
2228 | * Mark us a user worker, and block any signal that isn't |
2229 | * fatal or STOP |
2230 | */ |
2231 | p->flags |= PF_USER_WORKER; |
2232 | siginitsetinv(set: &p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2233 | } |
2234 | if (args->io_thread) |
2235 | p->flags |= PF_IO_WORKER; |
2236 | |
2237 | if (args->name) |
2238 | strscpy_pad(p->comm, args->name, sizeof(p->comm)); |
2239 | |
2240 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; |
2241 | /* |
2242 | * Clear TID on mm_release()? |
2243 | */ |
2244 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; |
2245 | |
2246 | ftrace_graph_init_task(t: p); |
2247 | |
2248 | rt_mutex_init_task(p); |
2249 | |
2250 | lockdep_assert_irqs_enabled(); |
2251 | #ifdef CONFIG_PROVE_LOCKING |
2252 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
2253 | #endif |
2254 | retval = copy_creds(p, clone_flags); |
2255 | if (retval < 0) |
2256 | goto bad_fork_free; |
2257 | |
2258 | retval = -EAGAIN; |
2259 | if (is_rlimit_overlimit(task_ucounts(p), type: UCOUNT_RLIMIT_NPROC, max: rlimit(RLIMIT_NPROC))) { |
2260 | if (p->real_cred->user != INIT_USER && |
2261 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) |
2262 | goto bad_fork_cleanup_count; |
2263 | } |
2264 | current->flags &= ~PF_NPROC_EXCEEDED; |
2265 | |
2266 | /* |
2267 | * If multiple threads are within copy_process(), then this check |
2268 | * triggers too late. This doesn't hurt, the check is only there |
2269 | * to stop root fork bombs. |
2270 | */ |
2271 | retval = -EAGAIN; |
2272 | if (data_race(nr_threads >= max_threads)) |
2273 | goto bad_fork_cleanup_count; |
2274 | |
2275 | delayacct_tsk_init(tsk: p); /* Must remain after dup_task_struct() */ |
2276 | p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); |
2277 | p->flags |= PF_FORKNOEXEC; |
2278 | INIT_LIST_HEAD(list: &p->children); |
2279 | INIT_LIST_HEAD(list: &p->sibling); |
2280 | rcu_copy_process(p); |
2281 | p->vfork_done = NULL; |
2282 | spin_lock_init(&p->alloc_lock); |
2283 | |
2284 | init_sigpending(sig: &p->pending); |
2285 | |
2286 | p->utime = p->stime = p->gtime = 0; |
2287 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
2288 | p->utimescaled = p->stimescaled = 0; |
2289 | #endif |
2290 | prev_cputime_init(prev: &p->prev_cputime); |
2291 | |
2292 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
2293 | seqcount_init(&p->vtime.seqcount); |
2294 | p->vtime.starttime = 0; |
2295 | p->vtime.state = VTIME_INACTIVE; |
2296 | #endif |
2297 | |
2298 | #ifdef CONFIG_IO_URING |
2299 | p->io_uring = NULL; |
2300 | #endif |
2301 | |
2302 | p->default_timer_slack_ns = current->timer_slack_ns; |
2303 | |
2304 | #ifdef CONFIG_PSI |
2305 | p->psi_flags = 0; |
2306 | #endif |
2307 | |
2308 | task_io_accounting_init(ioac: &p->ioac); |
2309 | acct_clear_integrals(tsk: p); |
2310 | |
2311 | posix_cputimers_init(pct: &p->posix_cputimers); |
2312 | |
2313 | p->io_context = NULL; |
2314 | audit_set_context(task: p, NULL); |
2315 | cgroup_fork(p); |
2316 | if (args->kthread) { |
2317 | if (!set_kthread_struct(p)) |
2318 | goto bad_fork_cleanup_delayacct; |
2319 | } |
2320 | #ifdef CONFIG_NUMA |
2321 | p->mempolicy = mpol_dup(pol: p->mempolicy); |
2322 | if (IS_ERR(ptr: p->mempolicy)) { |
2323 | retval = PTR_ERR(ptr: p->mempolicy); |
2324 | p->mempolicy = NULL; |
2325 | goto bad_fork_cleanup_delayacct; |
2326 | } |
2327 | #endif |
2328 | #ifdef CONFIG_CPUSETS |
2329 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; |
2330 | p->cpuset_slab_spread_rotor = NUMA_NO_NODE; |
2331 | seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); |
2332 | #endif |
2333 | #ifdef CONFIG_TRACE_IRQFLAGS |
2334 | memset(&p->irqtrace, 0, sizeof(p->irqtrace)); |
2335 | p->irqtrace.hardirq_disable_ip = _THIS_IP_; |
2336 | p->irqtrace.softirq_enable_ip = _THIS_IP_; |
2337 | p->softirqs_enabled = 1; |
2338 | p->softirq_context = 0; |
2339 | #endif |
2340 | |
2341 | p->pagefault_disabled = 0; |
2342 | |
2343 | #ifdef CONFIG_LOCKDEP |
2344 | lockdep_init_task(task: p); |
2345 | #endif |
2346 | |
2347 | #ifdef CONFIG_DEBUG_MUTEXES |
2348 | p->blocked_on = NULL; /* not blocked yet */ |
2349 | #endif |
2350 | #ifdef CONFIG_BCACHE |
2351 | p->sequential_io = 0; |
2352 | p->sequential_io_avg = 0; |
2353 | #endif |
2354 | #ifdef CONFIG_BPF_SYSCALL |
2355 | RCU_INIT_POINTER(p->bpf_storage, NULL); |
2356 | p->bpf_ctx = NULL; |
2357 | #endif |
2358 | |
2359 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
2360 | retval = sched_fork(clone_flags, p); |
2361 | if (retval) |
2362 | goto bad_fork_cleanup_policy; |
2363 | |
2364 | retval = perf_event_init_task(child: p, clone_flags); |
2365 | if (retval) |
2366 | goto bad_fork_cleanup_policy; |
2367 | retval = audit_alloc(task: p); |
2368 | if (retval) |
2369 | goto bad_fork_cleanup_perf; |
2370 | /* copy all the process information */ |
2371 | shm_init_task(p); |
2372 | retval = security_task_alloc(task: p, clone_flags); |
2373 | if (retval) |
2374 | goto bad_fork_cleanup_audit; |
2375 | retval = copy_semundo(clone_flags, tsk: p); |
2376 | if (retval) |
2377 | goto bad_fork_cleanup_security; |
2378 | retval = copy_files(clone_flags, tsk: p, no_files: args->no_files); |
2379 | if (retval) |
2380 | goto bad_fork_cleanup_semundo; |
2381 | retval = copy_fs(clone_flags, tsk: p); |
2382 | if (retval) |
2383 | goto bad_fork_cleanup_files; |
2384 | retval = copy_sighand(clone_flags, tsk: p); |
2385 | if (retval) |
2386 | goto bad_fork_cleanup_fs; |
2387 | retval = copy_signal(clone_flags, tsk: p); |
2388 | if (retval) |
2389 | goto bad_fork_cleanup_sighand; |
2390 | retval = copy_mm(clone_flags, tsk: p); |
2391 | if (retval) |
2392 | goto bad_fork_cleanup_signal; |
2393 | retval = copy_namespaces(flags: clone_flags, tsk: p); |
2394 | if (retval) |
2395 | goto bad_fork_cleanup_mm; |
2396 | retval = copy_io(clone_flags, tsk: p); |
2397 | if (retval) |
2398 | goto bad_fork_cleanup_namespaces; |
2399 | retval = copy_thread(p, args); |
2400 | if (retval) |
2401 | goto bad_fork_cleanup_io; |
2402 | |
2403 | stackleak_task_init(t: p); |
2404 | |
2405 | if (pid != &init_struct_pid) { |
2406 | pid = alloc_pid(ns: p->nsproxy->pid_ns_for_children, set_tid: args->set_tid, |
2407 | set_tid_size: args->set_tid_size); |
2408 | if (IS_ERR(ptr: pid)) { |
2409 | retval = PTR_ERR(ptr: pid); |
2410 | goto bad_fork_cleanup_thread; |
2411 | } |
2412 | } |
2413 | |
2414 | /* |
2415 | * This has to happen after we've potentially unshared the file |
2416 | * descriptor table (so that the pidfd doesn't leak into the child |
2417 | * if the fd table isn't shared). |
2418 | */ |
2419 | if (clone_flags & CLONE_PIDFD) { |
2420 | int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0; |
2421 | |
2422 | /* Note that no task has been attached to @pid yet. */ |
2423 | retval = __pidfd_prepare(pid, flags, ret: &pidfile); |
2424 | if (retval < 0) |
2425 | goto bad_fork_free_pid; |
2426 | pidfd = retval; |
2427 | |
2428 | retval = put_user(pidfd, args->pidfd); |
2429 | if (retval) |
2430 | goto bad_fork_put_pidfd; |
2431 | } |
2432 | |
2433 | #ifdef CONFIG_BLOCK |
2434 | p->plug = NULL; |
2435 | #endif |
2436 | futex_init_task(tsk: p); |
2437 | |
2438 | /* |
2439 | * sigaltstack should be cleared when sharing the same VM |
2440 | */ |
2441 | if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) |
2442 | sas_ss_reset(p); |
2443 | |
2444 | /* |
2445 | * Syscall tracing and stepping should be turned off in the |
2446 | * child regardless of CLONE_PTRACE. |
2447 | */ |
2448 | user_disable_single_step(p); |
2449 | clear_task_syscall_work(p, SYSCALL_TRACE); |
2450 | #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) |
2451 | clear_task_syscall_work(p, SYSCALL_EMU); |
2452 | #endif |
2453 | clear_tsk_latency_tracing(p); |
2454 | |
2455 | /* ok, now we should be set up.. */ |
2456 | p->pid = pid_nr(pid); |
2457 | if (clone_flags & CLONE_THREAD) { |
2458 | p->group_leader = current->group_leader; |
2459 | p->tgid = current->tgid; |
2460 | } else { |
2461 | p->group_leader = p; |
2462 | p->tgid = p->pid; |
2463 | } |
2464 | |
2465 | p->nr_dirtied = 0; |
2466 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); |
2467 | p->dirty_paused_when = 0; |
2468 | |
2469 | p->pdeath_signal = 0; |
2470 | p->task_works = NULL; |
2471 | clear_posix_cputimers_work(p); |
2472 | |
2473 | #ifdef CONFIG_KRETPROBES |
2474 | p->kretprobe_instances.first = NULL; |
2475 | #endif |
2476 | #ifdef CONFIG_RETHOOK |
2477 | p->rethooks.first = NULL; |
2478 | #endif |
2479 | |
2480 | /* |
2481 | * Ensure that the cgroup subsystem policies allow the new process to be |
2482 | * forked. It should be noted that the new process's css_set can be changed |
2483 | * between here and cgroup_post_fork() if an organisation operation is in |
2484 | * progress. |
2485 | */ |
2486 | retval = cgroup_can_fork(p, kargs: args); |
2487 | if (retval) |
2488 | goto bad_fork_put_pidfd; |
2489 | |
2490 | /* |
2491 | * Now that the cgroups are pinned, re-clone the parent cgroup and put |
2492 | * the new task on the correct runqueue. All this *before* the task |
2493 | * becomes visible. |
2494 | * |
2495 | * This isn't part of ->can_fork() because while the re-cloning is |
2496 | * cgroup specific, it unconditionally needs to place the task on a |
2497 | * runqueue. |
2498 | */ |
2499 | sched_cgroup_fork(p, kargs: args); |
2500 | |
2501 | /* |
2502 | * From this point on we must avoid any synchronous user-space |
2503 | * communication until we take the tasklist-lock. In particular, we do |
2504 | * not want user-space to be able to predict the process start-time by |
2505 | * stalling fork(2) after we recorded the start_time but before it is |
2506 | * visible to the system. |
2507 | */ |
2508 | |
2509 | p->start_time = ktime_get_ns(); |
2510 | p->start_boottime = ktime_get_boottime_ns(); |
2511 | |
2512 | /* |
2513 | * Make it visible to the rest of the system, but dont wake it up yet. |
2514 | * Need tasklist lock for parent etc handling! |
2515 | */ |
2516 | write_lock_irq(&tasklist_lock); |
2517 | |
2518 | /* CLONE_PARENT re-uses the old parent */ |
2519 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
2520 | p->real_parent = current->real_parent; |
2521 | p->parent_exec_id = current->parent_exec_id; |
2522 | if (clone_flags & CLONE_THREAD) |
2523 | p->exit_signal = -1; |
2524 | else |
2525 | p->exit_signal = current->group_leader->exit_signal; |
2526 | } else { |
2527 | p->real_parent = current; |
2528 | p->parent_exec_id = current->self_exec_id; |
2529 | p->exit_signal = args->exit_signal; |
2530 | } |
2531 | |
2532 | klp_copy_process(child: p); |
2533 | |
2534 | sched_core_fork(p); |
2535 | |
2536 | spin_lock(lock: ¤t->sighand->siglock); |
2537 | |
2538 | rv_task_fork(p); |
2539 | |
2540 | rseq_fork(t: p, clone_flags); |
2541 | |
2542 | /* Don't start children in a dying pid namespace */ |
2543 | if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { |
2544 | retval = -ENOMEM; |
2545 | goto bad_fork_cancel_cgroup; |
2546 | } |
2547 | |
2548 | /* Let kill terminate clone/fork in the middle */ |
2549 | if (fatal_signal_pending(current)) { |
2550 | retval = -EINTR; |
2551 | goto bad_fork_cancel_cgroup; |
2552 | } |
2553 | |
2554 | /* No more failure paths after this point. */ |
2555 | |
2556 | /* |
2557 | * Copy seccomp details explicitly here, in case they were changed |
2558 | * before holding sighand lock. |
2559 | */ |
2560 | copy_seccomp(p); |
2561 | |
2562 | init_task_pid_links(task: p); |
2563 | if (likely(p->pid)) { |
2564 | ptrace_init_task(child: p, ptrace: (clone_flags & CLONE_PTRACE) || trace); |
2565 | |
2566 | init_task_pid(task: p, type: PIDTYPE_PID, pid); |
2567 | if (thread_group_leader(p)) { |
2568 | init_task_pid(task: p, type: PIDTYPE_TGID, pid); |
2569 | init_task_pid(task: p, type: PIDTYPE_PGID, pid: task_pgrp(current)); |
2570 | init_task_pid(task: p, type: PIDTYPE_SID, pid: task_session(current)); |
2571 | |
2572 | if (is_child_reaper(pid)) { |
2573 | ns_of_pid(pid)->child_reaper = p; |
2574 | p->signal->flags |= SIGNAL_UNKILLABLE; |
2575 | } |
2576 | p->signal->shared_pending.signal = delayed.signal; |
2577 | p->signal->tty = tty_kref_get(current->signal->tty); |
2578 | /* |
2579 | * Inherit has_child_subreaper flag under the same |
2580 | * tasklist_lock with adding child to the process tree |
2581 | * for propagate_has_child_subreaper optimization. |
2582 | */ |
2583 | p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || |
2584 | p->real_parent->signal->is_child_subreaper; |
2585 | list_add_tail(new: &p->sibling, head: &p->real_parent->children); |
2586 | list_add_tail_rcu(new: &p->tasks, head: &init_task.tasks); |
2587 | attach_pid(task: p, PIDTYPE_TGID); |
2588 | attach_pid(task: p, PIDTYPE_PGID); |
2589 | attach_pid(task: p, PIDTYPE_SID); |
2590 | __this_cpu_inc(process_counts); |
2591 | } else { |
2592 | current->signal->nr_threads++; |
2593 | current->signal->quick_threads++; |
2594 | atomic_inc(v: ¤t->signal->live); |
2595 | refcount_inc(r: ¤t->signal->sigcnt); |
2596 | task_join_group_stop(task: p); |
2597 | list_add_tail_rcu(new: &p->thread_node, |
2598 | head: &p->signal->thread_head); |
2599 | } |
2600 | attach_pid(task: p, PIDTYPE_PID); |
2601 | nr_threads++; |
2602 | } |
2603 | total_forks++; |
2604 | hlist_del_init(n: &delayed.node); |
2605 | spin_unlock(lock: ¤t->sighand->siglock); |
2606 | syscall_tracepoint_update(p); |
2607 | write_unlock_irq(&tasklist_lock); |
2608 | |
2609 | if (pidfile) |
2610 | fd_install(fd: pidfd, file: pidfile); |
2611 | |
2612 | proc_fork_connector(task: p); |
2613 | sched_post_fork(p); |
2614 | cgroup_post_fork(p, kargs: args); |
2615 | perf_event_fork(tsk: p); |
2616 | |
2617 | trace_task_newtask(task: p, clone_flags); |
2618 | uprobe_copy_process(t: p, flags: clone_flags); |
2619 | user_events_fork(t: p, clone_flags); |
2620 | |
2621 | copy_oom_score_adj(clone_flags, tsk: p); |
2622 | |
2623 | return p; |
2624 | |
2625 | bad_fork_cancel_cgroup: |
2626 | sched_core_free(tsk: p); |
2627 | spin_unlock(lock: ¤t->sighand->siglock); |
2628 | write_unlock_irq(&tasklist_lock); |
2629 | cgroup_cancel_fork(p, kargs: args); |
2630 | bad_fork_put_pidfd: |
2631 | if (clone_flags & CLONE_PIDFD) { |
2632 | fput(pidfile); |
2633 | put_unused_fd(fd: pidfd); |
2634 | } |
2635 | bad_fork_free_pid: |
2636 | if (pid != &init_struct_pid) |
2637 | free_pid(pid); |
2638 | bad_fork_cleanup_thread: |
2639 | exit_thread(tsk: p); |
2640 | bad_fork_cleanup_io: |
2641 | if (p->io_context) |
2642 | exit_io_context(task: p); |
2643 | bad_fork_cleanup_namespaces: |
2644 | exit_task_namespaces(tsk: p); |
2645 | bad_fork_cleanup_mm: |
2646 | if (p->mm) { |
2647 | mm_clear_owner(mm: p->mm, p); |
2648 | mmput(p->mm); |
2649 | } |
2650 | bad_fork_cleanup_signal: |
2651 | if (!(clone_flags & CLONE_THREAD)) |
2652 | free_signal_struct(sig: p->signal); |
2653 | bad_fork_cleanup_sighand: |
2654 | __cleanup_sighand(sighand: p->sighand); |
2655 | bad_fork_cleanup_fs: |
2656 | exit_fs(p); /* blocking */ |
2657 | bad_fork_cleanup_files: |
2658 | exit_files(p); /* blocking */ |
2659 | bad_fork_cleanup_semundo: |
2660 | exit_sem(tsk: p); |
2661 | bad_fork_cleanup_security: |
2662 | security_task_free(task: p); |
2663 | bad_fork_cleanup_audit: |
2664 | audit_free(task: p); |
2665 | bad_fork_cleanup_perf: |
2666 | perf_event_free_task(task: p); |
2667 | bad_fork_cleanup_policy: |
2668 | lockdep_free_task(task: p); |
2669 | #ifdef CONFIG_NUMA |
2670 | mpol_put(pol: p->mempolicy); |
2671 | #endif |
2672 | bad_fork_cleanup_delayacct: |
2673 | delayacct_tsk_free(tsk: p); |
2674 | bad_fork_cleanup_count: |
2675 | dec_rlimit_ucounts(task_ucounts(p), type: UCOUNT_RLIMIT_NPROC, v: 1); |
2676 | exit_creds(p); |
2677 | bad_fork_free: |
2678 | WRITE_ONCE(p->__state, TASK_DEAD); |
2679 | exit_task_stack_account(tsk: p); |
2680 | put_task_stack(tsk: p); |
2681 | delayed_free_task(tsk: p); |
2682 | fork_out: |
2683 | spin_lock_irq(lock: ¤t->sighand->siglock); |
2684 | hlist_del_init(n: &delayed.node); |
2685 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2686 | return ERR_PTR(error: retval); |
2687 | } |
2688 | |
2689 | static inline void init_idle_pids(struct task_struct *idle) |
2690 | { |
2691 | enum pid_type type; |
2692 | |
2693 | for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { |
2694 | INIT_HLIST_NODE(h: &idle->pid_links[type]); /* not really needed */ |
2695 | init_task_pid(task: idle, type, pid: &init_struct_pid); |
2696 | } |
2697 | } |
2698 | |
2699 | static int idle_dummy(void *dummy) |
2700 | { |
2701 | /* This function is never called */ |
2702 | return 0; |
2703 | } |
2704 | |
2705 | struct task_struct * __init fork_idle(int cpu) |
2706 | { |
2707 | struct task_struct *task; |
2708 | struct kernel_clone_args args = { |
2709 | .flags = CLONE_VM, |
2710 | .fn = &idle_dummy, |
2711 | .fn_arg = NULL, |
2712 | .kthread = 1, |
2713 | .idle = 1, |
2714 | }; |
2715 | |
2716 | task = copy_process(pid: &init_struct_pid, trace: 0, cpu_to_node(cpu), args: &args); |
2717 | if (!IS_ERR(ptr: task)) { |
2718 | init_idle_pids(idle: task); |
2719 | init_idle(idle: task, cpu); |
2720 | } |
2721 | |
2722 | return task; |
2723 | } |
2724 | |
2725 | /* |
2726 | * This is like kernel_clone(), but shaved down and tailored to just |
2727 | * creating io_uring workers. It returns a created task, or an error pointer. |
2728 | * The returned task is inactive, and the caller must fire it up through |
2729 | * wake_up_new_task(p). All signals are blocked in the created task. |
2730 | */ |
2731 | struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) |
2732 | { |
2733 | unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| |
2734 | CLONE_IO; |
2735 | struct kernel_clone_args args = { |
2736 | .flags = ((lower_32_bits(flags) | CLONE_VM | |
2737 | CLONE_UNTRACED) & ~CSIGNAL), |
2738 | .exit_signal = (lower_32_bits(flags) & CSIGNAL), |
2739 | .fn = fn, |
2740 | .fn_arg = arg, |
2741 | .io_thread = 1, |
2742 | .user_worker = 1, |
2743 | }; |
2744 | |
2745 | return copy_process(NULL, trace: 0, node, args: &args); |
2746 | } |
2747 | |
2748 | /* |
2749 | * Ok, this is the main fork-routine. |
2750 | * |
2751 | * It copies the process, and if successful kick-starts |
2752 | * it and waits for it to finish using the VM if required. |
2753 | * |
2754 | * args->exit_signal is expected to be checked for sanity by the caller. |
2755 | */ |
2756 | pid_t kernel_clone(struct kernel_clone_args *args) |
2757 | { |
2758 | u64 clone_flags = args->flags; |
2759 | struct completion vfork; |
2760 | struct pid *pid; |
2761 | struct task_struct *p; |
2762 | int trace = 0; |
2763 | pid_t nr; |
2764 | |
2765 | /* |
2766 | * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument |
2767 | * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are |
2768 | * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate |
2769 | * field in struct clone_args and it still doesn't make sense to have |
2770 | * them both point at the same memory location. Performing this check |
2771 | * here has the advantage that we don't need to have a separate helper |
2772 | * to check for legacy clone(). |
2773 | */ |
2774 | if ((clone_flags & CLONE_PIDFD) && |
2775 | (clone_flags & CLONE_PARENT_SETTID) && |
2776 | (args->pidfd == args->parent_tid)) |
2777 | return -EINVAL; |
2778 | |
2779 | /* |
2780 | * Determine whether and which event to report to ptracer. When |
2781 | * called from kernel_thread or CLONE_UNTRACED is explicitly |
2782 | * requested, no event is reported; otherwise, report if the event |
2783 | * for the type of forking is enabled. |
2784 | */ |
2785 | if (!(clone_flags & CLONE_UNTRACED)) { |
2786 | if (clone_flags & CLONE_VFORK) |
2787 | trace = PTRACE_EVENT_VFORK; |
2788 | else if (args->exit_signal != SIGCHLD) |
2789 | trace = PTRACE_EVENT_CLONE; |
2790 | else |
2791 | trace = PTRACE_EVENT_FORK; |
2792 | |
2793 | if (likely(!ptrace_event_enabled(current, trace))) |
2794 | trace = 0; |
2795 | } |
2796 | |
2797 | p = copy_process(NULL, trace, NUMA_NO_NODE, args); |
2798 | add_latent_entropy(); |
2799 | |
2800 | if (IS_ERR(ptr: p)) |
2801 | return PTR_ERR(ptr: p); |
2802 | |
2803 | /* |
2804 | * Do this prior waking up the new thread - the thread pointer |
2805 | * might get invalid after that point, if the thread exits quickly. |
2806 | */ |
2807 | trace_sched_process_fork(current, child: p); |
2808 | |
2809 | pid = get_task_pid(task: p, type: PIDTYPE_PID); |
2810 | nr = pid_vnr(pid); |
2811 | |
2812 | if (clone_flags & CLONE_PARENT_SETTID) |
2813 | put_user(nr, args->parent_tid); |
2814 | |
2815 | if (clone_flags & CLONE_VFORK) { |
2816 | p->vfork_done = &vfork; |
2817 | init_completion(x: &vfork); |
2818 | get_task_struct(t: p); |
2819 | } |
2820 | |
2821 | if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) { |
2822 | /* lock the task to synchronize with memcg migration */ |
2823 | task_lock(p); |
2824 | lru_gen_add_mm(mm: p->mm); |
2825 | task_unlock(p); |
2826 | } |
2827 | |
2828 | wake_up_new_task(tsk: p); |
2829 | |
2830 | /* forking complete and child started to run, tell ptracer */ |
2831 | if (unlikely(trace)) |
2832 | ptrace_event_pid(event: trace, pid); |
2833 | |
2834 | if (clone_flags & CLONE_VFORK) { |
2835 | if (!wait_for_vfork_done(child: p, vfork: &vfork)) |
2836 | ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); |
2837 | } |
2838 | |
2839 | put_pid(pid); |
2840 | return nr; |
2841 | } |
2842 | |
2843 | /* |
2844 | * Create a kernel thread. |
2845 | */ |
2846 | pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, |
2847 | unsigned long flags) |
2848 | { |
2849 | struct kernel_clone_args args = { |
2850 | .flags = ((lower_32_bits(flags) | CLONE_VM | |
2851 | CLONE_UNTRACED) & ~CSIGNAL), |
2852 | .exit_signal = (lower_32_bits(flags) & CSIGNAL), |
2853 | .fn = fn, |
2854 | .fn_arg = arg, |
2855 | .name = name, |
2856 | .kthread = 1, |
2857 | }; |
2858 | |
2859 | return kernel_clone(args: &args); |
2860 | } |
2861 | |
2862 | /* |
2863 | * Create a user mode thread. |
2864 | */ |
2865 | pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags) |
2866 | { |
2867 | struct kernel_clone_args args = { |
2868 | .flags = ((lower_32_bits(flags) | CLONE_VM | |
2869 | CLONE_UNTRACED) & ~CSIGNAL), |
2870 | .exit_signal = (lower_32_bits(flags) & CSIGNAL), |
2871 | .fn = fn, |
2872 | .fn_arg = arg, |
2873 | }; |
2874 | |
2875 | return kernel_clone(args: &args); |
2876 | } |
2877 | |
2878 | #ifdef __ARCH_WANT_SYS_FORK |
2879 | SYSCALL_DEFINE0(fork) |
2880 | { |
2881 | #ifdef CONFIG_MMU |
2882 | struct kernel_clone_args args = { |
2883 | .exit_signal = SIGCHLD, |
2884 | }; |
2885 | |
2886 | return kernel_clone(args: &args); |
2887 | #else |
2888 | /* can not support in nommu mode */ |
2889 | return -EINVAL; |
2890 | #endif |
2891 | } |
2892 | #endif |
2893 | |
2894 | #ifdef __ARCH_WANT_SYS_VFORK |
2895 | SYSCALL_DEFINE0(vfork) |
2896 | { |
2897 | struct kernel_clone_args args = { |
2898 | .flags = CLONE_VFORK | CLONE_VM, |
2899 | .exit_signal = SIGCHLD, |
2900 | }; |
2901 | |
2902 | return kernel_clone(args: &args); |
2903 | } |
2904 | #endif |
2905 | |
2906 | #ifdef __ARCH_WANT_SYS_CLONE |
2907 | #ifdef CONFIG_CLONE_BACKWARDS |
2908 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
2909 | int __user *, parent_tidptr, |
2910 | unsigned long, tls, |
2911 | int __user *, child_tidptr) |
2912 | #elif defined(CONFIG_CLONE_BACKWARDS2) |
2913 | SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, |
2914 | int __user *, parent_tidptr, |
2915 | int __user *, child_tidptr, |
2916 | unsigned long, tls) |
2917 | #elif defined(CONFIG_CLONE_BACKWARDS3) |
2918 | SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, |
2919 | int, stack_size, |
2920 | int __user *, parent_tidptr, |
2921 | int __user *, child_tidptr, |
2922 | unsigned long, tls) |
2923 | #else |
2924 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
2925 | int __user *, parent_tidptr, |
2926 | int __user *, child_tidptr, |
2927 | unsigned long, tls) |
2928 | #endif |
2929 | { |
2930 | struct kernel_clone_args args = { |
2931 | .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), |
2932 | .pidfd = parent_tidptr, |
2933 | .child_tid = child_tidptr, |
2934 | .parent_tid = parent_tidptr, |
2935 | .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), |
2936 | .stack = newsp, |
2937 | .tls = tls, |
2938 | }; |
2939 | |
2940 | return kernel_clone(args: &args); |
2941 | } |
2942 | #endif |
2943 | |
2944 | #ifdef __ARCH_WANT_SYS_CLONE3 |
2945 | |
2946 | noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, |
2947 | struct clone_args __user *uargs, |
2948 | size_t usize) |
2949 | { |
2950 | int err; |
2951 | struct clone_args args; |
2952 | pid_t *kset_tid = kargs->set_tid; |
2953 | |
2954 | BUILD_BUG_ON(offsetofend(struct clone_args, tls) != |
2955 | CLONE_ARGS_SIZE_VER0); |
2956 | BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != |
2957 | CLONE_ARGS_SIZE_VER1); |
2958 | BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != |
2959 | CLONE_ARGS_SIZE_VER2); |
2960 | BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); |
2961 | |
2962 | if (unlikely(usize > PAGE_SIZE)) |
2963 | return -E2BIG; |
2964 | if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) |
2965 | return -EINVAL; |
2966 | |
2967 | err = copy_struct_from_user(dst: &args, ksize: sizeof(args), src: uargs, usize); |
2968 | if (err) |
2969 | return err; |
2970 | |
2971 | if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) |
2972 | return -EINVAL; |
2973 | |
2974 | if (unlikely(!args.set_tid && args.set_tid_size > 0)) |
2975 | return -EINVAL; |
2976 | |
2977 | if (unlikely(args.set_tid && args.set_tid_size == 0)) |
2978 | return -EINVAL; |
2979 | |
2980 | /* |
2981 | * Verify that higher 32bits of exit_signal are unset and that |
2982 | * it is a valid signal |
2983 | */ |
2984 | if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || |
2985 | !valid_signal(args.exit_signal))) |
2986 | return -EINVAL; |
2987 | |
2988 | if ((args.flags & CLONE_INTO_CGROUP) && |
2989 | (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) |
2990 | return -EINVAL; |
2991 | |
2992 | *kargs = (struct kernel_clone_args){ |
2993 | .flags = args.flags, |
2994 | .pidfd = u64_to_user_ptr(args.pidfd), |
2995 | .child_tid = u64_to_user_ptr(args.child_tid), |
2996 | .parent_tid = u64_to_user_ptr(args.parent_tid), |
2997 | .exit_signal = args.exit_signal, |
2998 | .stack = args.stack, |
2999 | .stack_size = args.stack_size, |
3000 | .tls = args.tls, |
3001 | .set_tid_size = args.set_tid_size, |
3002 | .cgroup = args.cgroup, |
3003 | }; |
3004 | |
3005 | if (args.set_tid && |
3006 | copy_from_user(to: kset_tid, u64_to_user_ptr(args.set_tid), |
3007 | n: (kargs->set_tid_size * sizeof(pid_t)))) |
3008 | return -EFAULT; |
3009 | |
3010 | kargs->set_tid = kset_tid; |
3011 | |
3012 | return 0; |
3013 | } |
3014 | |
3015 | /** |
3016 | * clone3_stack_valid - check and prepare stack |
3017 | * @kargs: kernel clone args |
3018 | * |
3019 | * Verify that the stack arguments userspace gave us are sane. |
3020 | * In addition, set the stack direction for userspace since it's easy for us to |
3021 | * determine. |
3022 | */ |
3023 | static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) |
3024 | { |
3025 | if (kargs->stack == 0) { |
3026 | if (kargs->stack_size > 0) |
3027 | return false; |
3028 | } else { |
3029 | if (kargs->stack_size == 0) |
3030 | return false; |
3031 | |
3032 | if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) |
3033 | return false; |
3034 | |
3035 | #if !defined(CONFIG_STACK_GROWSUP) |
3036 | kargs->stack += kargs->stack_size; |
3037 | #endif |
3038 | } |
3039 | |
3040 | return true; |
3041 | } |
3042 | |
3043 | static bool clone3_args_valid(struct kernel_clone_args *kargs) |
3044 | { |
3045 | /* Verify that no unknown flags are passed along. */ |
3046 | if (kargs->flags & |
3047 | ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) |
3048 | return false; |
3049 | |
3050 | /* |
3051 | * - make the CLONE_DETACHED bit reusable for clone3 |
3052 | * - make the CSIGNAL bits reusable for clone3 |
3053 | */ |
3054 | if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) |
3055 | return false; |
3056 | |
3057 | if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == |
3058 | (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) |
3059 | return false; |
3060 | |
3061 | if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && |
3062 | kargs->exit_signal) |
3063 | return false; |
3064 | |
3065 | if (!clone3_stack_valid(kargs)) |
3066 | return false; |
3067 | |
3068 | return true; |
3069 | } |
3070 | |
3071 | /** |
3072 | * sys_clone3 - create a new process with specific properties |
3073 | * @uargs: argument structure |
3074 | * @size: size of @uargs |
3075 | * |
3076 | * clone3() is the extensible successor to clone()/clone2(). |
3077 | * It takes a struct as argument that is versioned by its size. |
3078 | * |
3079 | * Return: On success, a positive PID for the child process. |
3080 | * On error, a negative errno number. |
3081 | */ |
3082 | SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) |
3083 | { |
3084 | int err; |
3085 | |
3086 | struct kernel_clone_args kargs; |
3087 | pid_t set_tid[MAX_PID_NS_LEVEL]; |
3088 | |
3089 | kargs.set_tid = set_tid; |
3090 | |
3091 | err = copy_clone_args_from_user(kargs: &kargs, uargs, usize: size); |
3092 | if (err) |
3093 | return err; |
3094 | |
3095 | if (!clone3_args_valid(kargs: &kargs)) |
3096 | return -EINVAL; |
3097 | |
3098 | return kernel_clone(args: &kargs); |
3099 | } |
3100 | #endif |
3101 | |
3102 | void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) |
3103 | { |
3104 | struct task_struct *leader, *parent, *child; |
3105 | int res; |
3106 | |
3107 | read_lock(&tasklist_lock); |
3108 | leader = top = top->group_leader; |
3109 | down: |
3110 | for_each_thread(leader, parent) { |
3111 | list_for_each_entry(child, &parent->children, sibling) { |
3112 | res = visitor(child, data); |
3113 | if (res) { |
3114 | if (res < 0) |
3115 | goto out; |
3116 | leader = child; |
3117 | goto down; |
3118 | } |
3119 | up: |
3120 | ; |
3121 | } |
3122 | } |
3123 | |
3124 | if (leader != top) { |
3125 | child = leader; |
3126 | parent = child->real_parent; |
3127 | leader = parent->group_leader; |
3128 | goto up; |
3129 | } |
3130 | out: |
3131 | read_unlock(&tasklist_lock); |
3132 | } |
3133 | |
3134 | #ifndef ARCH_MIN_MMSTRUCT_ALIGN |
3135 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 |
3136 | #endif |
3137 | |
3138 | static void sighand_ctor(void *data) |
3139 | { |
3140 | struct sighand_struct *sighand = data; |
3141 | |
3142 | spin_lock_init(&sighand->siglock); |
3143 | init_waitqueue_head(&sighand->signalfd_wqh); |
3144 | } |
3145 | |
3146 | void __init mm_cache_init(void) |
3147 | { |
3148 | unsigned int mm_size; |
3149 | |
3150 | /* |
3151 | * The mm_cpumask is located at the end of mm_struct, and is |
3152 | * dynamically sized based on the maximum CPU number this system |
3153 | * can have, taking hotplug into account (nr_cpu_ids). |
3154 | */ |
3155 | mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size(); |
3156 | |
3157 | mm_cachep = kmem_cache_create_usercopy(name: "mm_struct" , |
3158 | size: mm_size, ARCH_MIN_MMSTRUCT_ALIGN, |
3159 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
3160 | offsetof(struct mm_struct, saved_auxv), |
3161 | sizeof_field(struct mm_struct, saved_auxv), |
3162 | NULL); |
3163 | } |
3164 | |
3165 | void __init proc_caches_init(void) |
3166 | { |
3167 | sighand_cachep = kmem_cache_create(name: "sighand_cache" , |
3168 | size: sizeof(struct sighand_struct), align: 0, |
3169 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| |
3170 | SLAB_ACCOUNT, ctor: sighand_ctor); |
3171 | signal_cachep = kmem_cache_create(name: "signal_cache" , |
3172 | size: sizeof(struct signal_struct), align: 0, |
3173 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
3174 | NULL); |
3175 | files_cachep = kmem_cache_create(name: "files_cache" , |
3176 | size: sizeof(struct files_struct), align: 0, |
3177 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
3178 | NULL); |
3179 | fs_cachep = kmem_cache_create(name: "fs_cache" , |
3180 | size: sizeof(struct fs_struct), align: 0, |
3181 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
3182 | NULL); |
3183 | |
3184 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); |
3185 | #ifdef CONFIG_PER_VMA_LOCK |
3186 | vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); |
3187 | #endif |
3188 | mmap_init(); |
3189 | nsproxy_cache_init(); |
3190 | } |
3191 | |
3192 | /* |
3193 | * Check constraints on flags passed to the unshare system call. |
3194 | */ |
3195 | static int check_unshare_flags(unsigned long unshare_flags) |
3196 | { |
3197 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| |
3198 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| |
3199 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| |
3200 | CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| |
3201 | CLONE_NEWTIME)) |
3202 | return -EINVAL; |
3203 | /* |
3204 | * Not implemented, but pretend it works if there is nothing |
3205 | * to unshare. Note that unsharing the address space or the |
3206 | * signal handlers also need to unshare the signal queues (aka |
3207 | * CLONE_THREAD). |
3208 | */ |
3209 | if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { |
3210 | if (!thread_group_empty(current)) |
3211 | return -EINVAL; |
3212 | } |
3213 | if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { |
3214 | if (refcount_read(r: ¤t->sighand->count) > 1) |
3215 | return -EINVAL; |
3216 | } |
3217 | if (unshare_flags & CLONE_VM) { |
3218 | if (!current_is_single_threaded()) |
3219 | return -EINVAL; |
3220 | } |
3221 | |
3222 | return 0; |
3223 | } |
3224 | |
3225 | /* |
3226 | * Unshare the filesystem structure if it is being shared |
3227 | */ |
3228 | static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
3229 | { |
3230 | struct fs_struct *fs = current->fs; |
3231 | |
3232 | if (!(unshare_flags & CLONE_FS) || !fs) |
3233 | return 0; |
3234 | |
3235 | /* don't need lock here; in the worst case we'll do useless copy */ |
3236 | if (fs->users == 1) |
3237 | return 0; |
3238 | |
3239 | *new_fsp = copy_fs_struct(fs); |
3240 | if (!*new_fsp) |
3241 | return -ENOMEM; |
3242 | |
3243 | return 0; |
3244 | } |
3245 | |
3246 | /* |
3247 | * Unshare file descriptor table if it is being shared |
3248 | */ |
3249 | int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, |
3250 | struct files_struct **new_fdp) |
3251 | { |
3252 | struct files_struct *fd = current->files; |
3253 | int error = 0; |
3254 | |
3255 | if ((unshare_flags & CLONE_FILES) && |
3256 | (fd && atomic_read(v: &fd->count) > 1)) { |
3257 | *new_fdp = dup_fd(fd, max_fds, &error); |
3258 | if (!*new_fdp) |
3259 | return error; |
3260 | } |
3261 | |
3262 | return 0; |
3263 | } |
3264 | |
3265 | /* |
3266 | * unshare allows a process to 'unshare' part of the process |
3267 | * context which was originally shared using clone. copy_* |
3268 | * functions used by kernel_clone() cannot be used here directly |
3269 | * because they modify an inactive task_struct that is being |
3270 | * constructed. Here we are modifying the current, active, |
3271 | * task_struct. |
3272 | */ |
3273 | int ksys_unshare(unsigned long unshare_flags) |
3274 | { |
3275 | struct fs_struct *fs, *new_fs = NULL; |
3276 | struct files_struct *new_fd = NULL; |
3277 | struct cred *new_cred = NULL; |
3278 | struct nsproxy *new_nsproxy = NULL; |
3279 | int do_sysvsem = 0; |
3280 | int err; |
3281 | |
3282 | /* |
3283 | * If unsharing a user namespace must also unshare the thread group |
3284 | * and unshare the filesystem root and working directories. |
3285 | */ |
3286 | if (unshare_flags & CLONE_NEWUSER) |
3287 | unshare_flags |= CLONE_THREAD | CLONE_FS; |
3288 | /* |
3289 | * If unsharing vm, must also unshare signal handlers. |
3290 | */ |
3291 | if (unshare_flags & CLONE_VM) |
3292 | unshare_flags |= CLONE_SIGHAND; |
3293 | /* |
3294 | * If unsharing a signal handlers, must also unshare the signal queues. |
3295 | */ |
3296 | if (unshare_flags & CLONE_SIGHAND) |
3297 | unshare_flags |= CLONE_THREAD; |
3298 | /* |
3299 | * If unsharing namespace, must also unshare filesystem information. |
3300 | */ |
3301 | if (unshare_flags & CLONE_NEWNS) |
3302 | unshare_flags |= CLONE_FS; |
3303 | |
3304 | err = check_unshare_flags(unshare_flags); |
3305 | if (err) |
3306 | goto bad_unshare_out; |
3307 | /* |
3308 | * CLONE_NEWIPC must also detach from the undolist: after switching |
3309 | * to a new ipc namespace, the semaphore arrays from the old |
3310 | * namespace are unreachable. |
3311 | */ |
3312 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) |
3313 | do_sysvsem = 1; |
3314 | err = unshare_fs(unshare_flags, new_fsp: &new_fs); |
3315 | if (err) |
3316 | goto bad_unshare_out; |
3317 | err = unshare_fd(unshare_flags, NR_OPEN_MAX, new_fdp: &new_fd); |
3318 | if (err) |
3319 | goto bad_unshare_cleanup_fs; |
3320 | err = unshare_userns(unshare_flags, new_cred: &new_cred); |
3321 | if (err) |
3322 | goto bad_unshare_cleanup_fd; |
3323 | err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, |
3324 | new_cred, new_fs); |
3325 | if (err) |
3326 | goto bad_unshare_cleanup_cred; |
3327 | |
3328 | if (new_cred) { |
3329 | err = set_cred_ucounts(new_cred); |
3330 | if (err) |
3331 | goto bad_unshare_cleanup_cred; |
3332 | } |
3333 | |
3334 | if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { |
3335 | if (do_sysvsem) { |
3336 | /* |
3337 | * CLONE_SYSVSEM is equivalent to sys_exit(). |
3338 | */ |
3339 | exit_sem(current); |
3340 | } |
3341 | if (unshare_flags & CLONE_NEWIPC) { |
3342 | /* Orphan segments in old ns (see sem above). */ |
3343 | exit_shm(current); |
3344 | shm_init_task(current); |
3345 | } |
3346 | |
3347 | if (new_nsproxy) |
3348 | switch_task_namespaces(current, new: new_nsproxy); |
3349 | |
3350 | task_lock(current); |
3351 | |
3352 | if (new_fs) { |
3353 | fs = current->fs; |
3354 | spin_lock(lock: &fs->lock); |
3355 | current->fs = new_fs; |
3356 | if (--fs->users) |
3357 | new_fs = NULL; |
3358 | else |
3359 | new_fs = fs; |
3360 | spin_unlock(lock: &fs->lock); |
3361 | } |
3362 | |
3363 | if (new_fd) |
3364 | swap(current->files, new_fd); |
3365 | |
3366 | task_unlock(current); |
3367 | |
3368 | if (new_cred) { |
3369 | /* Install the new user namespace */ |
3370 | commit_creds(new_cred); |
3371 | new_cred = NULL; |
3372 | } |
3373 | } |
3374 | |
3375 | perf_event_namespaces(current); |
3376 | |
3377 | bad_unshare_cleanup_cred: |
3378 | if (new_cred) |
3379 | put_cred(cred: new_cred); |
3380 | bad_unshare_cleanup_fd: |
3381 | if (new_fd) |
3382 | put_files_struct(fs: new_fd); |
3383 | |
3384 | bad_unshare_cleanup_fs: |
3385 | if (new_fs) |
3386 | free_fs_struct(new_fs); |
3387 | |
3388 | bad_unshare_out: |
3389 | return err; |
3390 | } |
3391 | |
3392 | SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
3393 | { |
3394 | return ksys_unshare(unshare_flags); |
3395 | } |
3396 | |
3397 | /* |
3398 | * Helper to unshare the files of the current task. |
3399 | * We don't want to expose copy_files internals to |
3400 | * the exec layer of the kernel. |
3401 | */ |
3402 | |
3403 | int unshare_files(void) |
3404 | { |
3405 | struct task_struct *task = current; |
3406 | struct files_struct *old, *copy = NULL; |
3407 | int error; |
3408 | |
3409 | error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, new_fdp: ©); |
3410 | if (error || !copy) |
3411 | return error; |
3412 | |
3413 | old = task->files; |
3414 | task_lock(p: task); |
3415 | task->files = copy; |
3416 | task_unlock(p: task); |
3417 | put_files_struct(fs: old); |
3418 | return 0; |
3419 | } |
3420 | |
3421 | int sysctl_max_threads(struct ctl_table *table, int write, |
3422 | void *buffer, size_t *lenp, loff_t *ppos) |
3423 | { |
3424 | struct ctl_table t; |
3425 | int ret; |
3426 | int threads = max_threads; |
3427 | int min = 1; |
3428 | int max = MAX_THREADS; |
3429 | |
3430 | t = *table; |
3431 | t.data = &threads; |
3432 | t.extra1 = &min; |
3433 | t.extra2 = &max; |
3434 | |
3435 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
3436 | if (ret || !write) |
3437 | return ret; |
3438 | |
3439 | max_threads = threads; |
3440 | |
3441 | return 0; |
3442 | } |
3443 | |