1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Generic pidhash and scalable, time-bounded PID allocator |
4 | * |
5 | * (C) 2002-2003 Nadia Yvette Chambers, IBM |
6 | * (C) 2004 Nadia Yvette Chambers, Oracle |
7 | * (C) 2002-2004 Ingo Molnar, Red Hat |
8 | * |
9 | * pid-structures are backing objects for tasks sharing a given ID to chain |
10 | * against. There is very little to them aside from hashing them and |
11 | * parking tasks using given ID's on a list. |
12 | * |
13 | * The hash is always changed with the tasklist_lock write-acquired, |
14 | * and the hash is only accessed with the tasklist_lock at least |
15 | * read-acquired, so there's no additional SMP locking needed here. |
16 | * |
17 | * We have a list of bitmap pages, which bitmaps represent the PID space. |
18 | * Allocating and freeing PIDs is completely lockless. The worst-case |
19 | * allocation scenario when all but one out of 1 million PIDs possible are |
20 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE |
21 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). |
22 | * |
23 | * Pid namespaces: |
24 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
25 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
26 | * Many thanks to Oleg Nesterov for comments and help |
27 | * |
28 | */ |
29 | |
30 | #include <linux/mm.h> |
31 | #include <linux/export.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/init.h> |
34 | #include <linux/rculist.h> |
35 | #include <linux/memblock.h> |
36 | #include <linux/pid_namespace.h> |
37 | #include <linux/init_task.h> |
38 | #include <linux/syscalls.h> |
39 | #include <linux/proc_ns.h> |
40 | #include <linux/refcount.h> |
41 | #include <linux/anon_inodes.h> |
42 | #include <linux/sched/signal.h> |
43 | #include <linux/sched/task.h> |
44 | #include <linux/idr.h> |
45 | #include <linux/pidfs.h> |
46 | #include <net/sock.h> |
47 | #include <uapi/linux/pidfd.h> |
48 | |
49 | struct pid init_struct_pid = { |
50 | .count = REFCOUNT_INIT(1), |
51 | .tasks = { |
52 | { .first = NULL }, |
53 | { .first = NULL }, |
54 | { .first = NULL }, |
55 | }, |
56 | .level = 0, |
57 | .numbers = { { |
58 | .nr = 0, |
59 | .ns = &init_pid_ns, |
60 | }, } |
61 | }; |
62 | |
63 | int pid_max = PID_MAX_DEFAULT; |
64 | |
65 | int pid_max_min = RESERVED_PIDS + 1; |
66 | int pid_max_max = PID_MAX_LIMIT; |
67 | /* |
68 | * Pseudo filesystems start inode numbering after one. We use Reserved |
69 | * PIDs as a natural offset. |
70 | */ |
71 | static u64 pidfs_ino = RESERVED_PIDS; |
72 | |
73 | /* |
74 | * PID-map pages start out as NULL, they get allocated upon |
75 | * first use and are never deallocated. This way a low pid_max |
76 | * value does not cause lots of bitmaps to be allocated, but |
77 | * the scheme scales to up to 4 million PIDs, runtime. |
78 | */ |
79 | struct pid_namespace init_pid_ns = { |
80 | .ns.count = REFCOUNT_INIT(2), |
81 | .idr = IDR_INIT(init_pid_ns.idr), |
82 | .pid_allocated = PIDNS_ADDING, |
83 | .level = 0, |
84 | .child_reaper = &init_task, |
85 | .user_ns = &init_user_ns, |
86 | .ns.inum = PROC_PID_INIT_INO, |
87 | #ifdef CONFIG_PID_NS |
88 | .ns.ops = &pidns_operations, |
89 | #endif |
90 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) |
91 | .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC, |
92 | #endif |
93 | }; |
94 | EXPORT_SYMBOL_GPL(init_pid_ns); |
95 | |
96 | /* |
97 | * Note: disable interrupts while the pidmap_lock is held as an |
98 | * interrupt might come in and do read_lock(&tasklist_lock). |
99 | * |
100 | * If we don't disable interrupts there is a nasty deadlock between |
101 | * detach_pid()->free_pid() and another cpu that does |
102 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does |
103 | * read_lock(&tasklist_lock); |
104 | * |
105 | * After we clean up the tasklist_lock and know there are no |
106 | * irq handlers that take it we can leave the interrupts enabled. |
107 | * For now it is easier to be safe than to prove it can't happen. |
108 | */ |
109 | |
110 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
111 | |
112 | void put_pid(struct pid *pid) |
113 | { |
114 | struct pid_namespace *ns; |
115 | |
116 | if (!pid) |
117 | return; |
118 | |
119 | ns = pid->numbers[pid->level].ns; |
120 | if (refcount_dec_and_test(r: &pid->count)) { |
121 | kmem_cache_free(s: ns->pid_cachep, objp: pid); |
122 | put_pid_ns(ns); |
123 | } |
124 | } |
125 | EXPORT_SYMBOL_GPL(put_pid); |
126 | |
127 | static void delayed_put_pid(struct rcu_head *rhp) |
128 | { |
129 | struct pid *pid = container_of(rhp, struct pid, rcu); |
130 | put_pid(pid); |
131 | } |
132 | |
133 | void free_pid(struct pid *pid) |
134 | { |
135 | /* We can be called with write_lock_irq(&tasklist_lock) held */ |
136 | int i; |
137 | unsigned long flags; |
138 | |
139 | spin_lock_irqsave(&pidmap_lock, flags); |
140 | for (i = 0; i <= pid->level; i++) { |
141 | struct upid *upid = pid->numbers + i; |
142 | struct pid_namespace *ns = upid->ns; |
143 | switch (--ns->pid_allocated) { |
144 | case 2: |
145 | case 1: |
146 | /* When all that is left in the pid namespace |
147 | * is the reaper wake up the reaper. The reaper |
148 | * may be sleeping in zap_pid_ns_processes(). |
149 | */ |
150 | wake_up_process(tsk: ns->child_reaper); |
151 | break; |
152 | case PIDNS_ADDING: |
153 | /* Handle a fork failure of the first process */ |
154 | WARN_ON(ns->child_reaper); |
155 | ns->pid_allocated = 0; |
156 | break; |
157 | } |
158 | |
159 | idr_remove(&ns->idr, id: upid->nr); |
160 | } |
161 | spin_unlock_irqrestore(lock: &pidmap_lock, flags); |
162 | |
163 | call_rcu(head: &pid->rcu, func: delayed_put_pid); |
164 | } |
165 | |
166 | struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, |
167 | size_t set_tid_size) |
168 | { |
169 | struct pid *pid; |
170 | enum pid_type type; |
171 | int i, nr; |
172 | struct pid_namespace *tmp; |
173 | struct upid *upid; |
174 | int retval = -ENOMEM; |
175 | |
176 | /* |
177 | * set_tid_size contains the size of the set_tid array. Starting at |
178 | * the most nested currently active PID namespace it tells alloc_pid() |
179 | * which PID to set for a process in that most nested PID namespace |
180 | * up to set_tid_size PID namespaces. It does not have to set the PID |
181 | * for a process in all nested PID namespaces but set_tid_size must |
182 | * never be greater than the current ns->level + 1. |
183 | */ |
184 | if (set_tid_size > ns->level + 1) |
185 | return ERR_PTR(error: -EINVAL); |
186 | |
187 | pid = kmem_cache_alloc(cachep: ns->pid_cachep, GFP_KERNEL); |
188 | if (!pid) |
189 | return ERR_PTR(error: retval); |
190 | |
191 | tmp = ns; |
192 | pid->level = ns->level; |
193 | |
194 | for (i = ns->level; i >= 0; i--) { |
195 | int tid = 0; |
196 | |
197 | if (set_tid_size) { |
198 | tid = set_tid[ns->level - i]; |
199 | |
200 | retval = -EINVAL; |
201 | if (tid < 1 || tid >= pid_max) |
202 | goto out_free; |
203 | /* |
204 | * Also fail if a PID != 1 is requested and |
205 | * no PID 1 exists. |
206 | */ |
207 | if (tid != 1 && !tmp->child_reaper) |
208 | goto out_free; |
209 | retval = -EPERM; |
210 | if (!checkpoint_restore_ns_capable(ns: tmp->user_ns)) |
211 | goto out_free; |
212 | set_tid_size--; |
213 | } |
214 | |
215 | idr_preload(GFP_KERNEL); |
216 | spin_lock_irq(lock: &pidmap_lock); |
217 | |
218 | if (tid) { |
219 | nr = idr_alloc(&tmp->idr, NULL, start: tid, |
220 | end: tid + 1, GFP_ATOMIC); |
221 | /* |
222 | * If ENOSPC is returned it means that the PID is |
223 | * alreay in use. Return EEXIST in that case. |
224 | */ |
225 | if (nr == -ENOSPC) |
226 | nr = -EEXIST; |
227 | } else { |
228 | int pid_min = 1; |
229 | /* |
230 | * init really needs pid 1, but after reaching the |
231 | * maximum wrap back to RESERVED_PIDS |
232 | */ |
233 | if (idr_get_cursor(idr: &tmp->idr) > RESERVED_PIDS) |
234 | pid_min = RESERVED_PIDS; |
235 | |
236 | /* |
237 | * Store a null pointer so find_pid_ns does not find |
238 | * a partially initialized PID (see below). |
239 | */ |
240 | nr = idr_alloc_cyclic(&tmp->idr, NULL, start: pid_min, |
241 | end: pid_max, GFP_ATOMIC); |
242 | } |
243 | spin_unlock_irq(lock: &pidmap_lock); |
244 | idr_preload_end(); |
245 | |
246 | if (nr < 0) { |
247 | retval = (nr == -ENOSPC) ? -EAGAIN : nr; |
248 | goto out_free; |
249 | } |
250 | |
251 | pid->numbers[i].nr = nr; |
252 | pid->numbers[i].ns = tmp; |
253 | tmp = tmp->parent; |
254 | } |
255 | |
256 | /* |
257 | * ENOMEM is not the most obvious choice especially for the case |
258 | * where the child subreaper has already exited and the pid |
259 | * namespace denies the creation of any new processes. But ENOMEM |
260 | * is what we have exposed to userspace for a long time and it is |
261 | * documented behavior for pid namespaces. So we can't easily |
262 | * change it even if there were an error code better suited. |
263 | */ |
264 | retval = -ENOMEM; |
265 | |
266 | get_pid_ns(ns); |
267 | refcount_set(r: &pid->count, n: 1); |
268 | spin_lock_init(&pid->lock); |
269 | for (type = 0; type < PIDTYPE_MAX; ++type) |
270 | INIT_HLIST_HEAD(&pid->tasks[type]); |
271 | |
272 | init_waitqueue_head(&pid->wait_pidfd); |
273 | INIT_HLIST_HEAD(&pid->inodes); |
274 | |
275 | upid = pid->numbers + ns->level; |
276 | spin_lock_irq(lock: &pidmap_lock); |
277 | if (!(ns->pid_allocated & PIDNS_ADDING)) |
278 | goto out_unlock; |
279 | pid->stashed = NULL; |
280 | pid->ino = ++pidfs_ino; |
281 | for ( ; upid >= pid->numbers; --upid) { |
282 | /* Make the PID visible to find_pid_ns. */ |
283 | idr_replace(&upid->ns->idr, pid, id: upid->nr); |
284 | upid->ns->pid_allocated++; |
285 | } |
286 | spin_unlock_irq(lock: &pidmap_lock); |
287 | |
288 | return pid; |
289 | |
290 | out_unlock: |
291 | spin_unlock_irq(lock: &pidmap_lock); |
292 | put_pid_ns(ns); |
293 | |
294 | out_free: |
295 | spin_lock_irq(lock: &pidmap_lock); |
296 | while (++i <= ns->level) { |
297 | upid = pid->numbers + i; |
298 | idr_remove(&upid->ns->idr, id: upid->nr); |
299 | } |
300 | |
301 | /* On failure to allocate the first pid, reset the state */ |
302 | if (ns->pid_allocated == PIDNS_ADDING) |
303 | idr_set_cursor(idr: &ns->idr, val: 0); |
304 | |
305 | spin_unlock_irq(lock: &pidmap_lock); |
306 | |
307 | kmem_cache_free(s: ns->pid_cachep, objp: pid); |
308 | return ERR_PTR(error: retval); |
309 | } |
310 | |
311 | void disable_pid_allocation(struct pid_namespace *ns) |
312 | { |
313 | spin_lock_irq(lock: &pidmap_lock); |
314 | ns->pid_allocated &= ~PIDNS_ADDING; |
315 | spin_unlock_irq(lock: &pidmap_lock); |
316 | } |
317 | |
318 | struct pid *find_pid_ns(int nr, struct pid_namespace *ns) |
319 | { |
320 | return idr_find(&ns->idr, id: nr); |
321 | } |
322 | EXPORT_SYMBOL_GPL(find_pid_ns); |
323 | |
324 | struct pid *find_vpid(int nr) |
325 | { |
326 | return find_pid_ns(nr, task_active_pid_ns(current)); |
327 | } |
328 | EXPORT_SYMBOL_GPL(find_vpid); |
329 | |
330 | static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) |
331 | { |
332 | return (type == PIDTYPE_PID) ? |
333 | &task->thread_pid : |
334 | &task->signal->pids[type]; |
335 | } |
336 | |
337 | /* |
338 | * attach_pid() must be called with the tasklist_lock write-held. |
339 | */ |
340 | void attach_pid(struct task_struct *task, enum pid_type type) |
341 | { |
342 | struct pid *pid = *task_pid_ptr(task, type); |
343 | hlist_add_head_rcu(n: &task->pid_links[type], h: &pid->tasks[type]); |
344 | } |
345 | |
346 | static void __change_pid(struct task_struct *task, enum pid_type type, |
347 | struct pid *new) |
348 | { |
349 | struct pid **pid_ptr = task_pid_ptr(task, type); |
350 | struct pid *pid; |
351 | int tmp; |
352 | |
353 | pid = *pid_ptr; |
354 | |
355 | hlist_del_rcu(n: &task->pid_links[type]); |
356 | *pid_ptr = new; |
357 | |
358 | if (type == PIDTYPE_PID) { |
359 | WARN_ON_ONCE(pid_has_task(pid, PIDTYPE_PID)); |
360 | wake_up_all(&pid->wait_pidfd); |
361 | } |
362 | |
363 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
364 | if (pid_has_task(pid, type: tmp)) |
365 | return; |
366 | |
367 | free_pid(pid); |
368 | } |
369 | |
370 | void detach_pid(struct task_struct *task, enum pid_type type) |
371 | { |
372 | __change_pid(task, type, NULL); |
373 | } |
374 | |
375 | void change_pid(struct task_struct *task, enum pid_type type, |
376 | struct pid *pid) |
377 | { |
378 | __change_pid(task, type, new: pid); |
379 | attach_pid(task, type); |
380 | } |
381 | |
382 | void exchange_tids(struct task_struct *left, struct task_struct *right) |
383 | { |
384 | struct pid *pid1 = left->thread_pid; |
385 | struct pid *pid2 = right->thread_pid; |
386 | struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID]; |
387 | struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID]; |
388 | |
389 | /* Swap the single entry tid lists */ |
390 | hlists_swap_heads_rcu(left: head1, right: head2); |
391 | |
392 | /* Swap the per task_struct pid */ |
393 | rcu_assign_pointer(left->thread_pid, pid2); |
394 | rcu_assign_pointer(right->thread_pid, pid1); |
395 | |
396 | /* Swap the cached value */ |
397 | WRITE_ONCE(left->pid, pid_nr(pid2)); |
398 | WRITE_ONCE(right->pid, pid_nr(pid1)); |
399 | } |
400 | |
401 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
402 | void transfer_pid(struct task_struct *old, struct task_struct *new, |
403 | enum pid_type type) |
404 | { |
405 | WARN_ON_ONCE(type == PIDTYPE_PID); |
406 | hlist_replace_rcu(old: &old->pid_links[type], new: &new->pid_links[type]); |
407 | } |
408 | |
409 | struct task_struct *pid_task(struct pid *pid, enum pid_type type) |
410 | { |
411 | struct task_struct *result = NULL; |
412 | if (pid) { |
413 | struct hlist_node *first; |
414 | first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), |
415 | lockdep_tasklist_lock_is_held()); |
416 | if (first) |
417 | result = hlist_entry(first, struct task_struct, pid_links[(type)]); |
418 | } |
419 | return result; |
420 | } |
421 | EXPORT_SYMBOL(pid_task); |
422 | |
423 | /* |
424 | * Must be called under rcu_read_lock(). |
425 | */ |
426 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) |
427 | { |
428 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), |
429 | "find_task_by_pid_ns() needs rcu_read_lock() protection"); |
430 | return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); |
431 | } |
432 | |
433 | struct task_struct *find_task_by_vpid(pid_t vnr) |
434 | { |
435 | return find_task_by_pid_ns(nr: vnr, ns: task_active_pid_ns(current)); |
436 | } |
437 | |
438 | struct task_struct *find_get_task_by_vpid(pid_t nr) |
439 | { |
440 | struct task_struct *task; |
441 | |
442 | rcu_read_lock(); |
443 | task = find_task_by_vpid(vnr: nr); |
444 | if (task) |
445 | get_task_struct(t: task); |
446 | rcu_read_unlock(); |
447 | |
448 | return task; |
449 | } |
450 | |
451 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
452 | { |
453 | struct pid *pid; |
454 | rcu_read_lock(); |
455 | pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); |
456 | rcu_read_unlock(); |
457 | return pid; |
458 | } |
459 | EXPORT_SYMBOL_GPL(get_task_pid); |
460 | |
461 | struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) |
462 | { |
463 | struct task_struct *result; |
464 | rcu_read_lock(); |
465 | result = pid_task(pid, type); |
466 | if (result) |
467 | get_task_struct(t: result); |
468 | rcu_read_unlock(); |
469 | return result; |
470 | } |
471 | EXPORT_SYMBOL_GPL(get_pid_task); |
472 | |
473 | struct pid *find_get_pid(pid_t nr) |
474 | { |
475 | struct pid *pid; |
476 | |
477 | rcu_read_lock(); |
478 | pid = get_pid(pid: find_vpid(nr)); |
479 | rcu_read_unlock(); |
480 | |
481 | return pid; |
482 | } |
483 | EXPORT_SYMBOL_GPL(find_get_pid); |
484 | |
485 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
486 | { |
487 | struct upid *upid; |
488 | pid_t nr = 0; |
489 | |
490 | if (pid && ns->level <= pid->level) { |
491 | upid = &pid->numbers[ns->level]; |
492 | if (upid->ns == ns) |
493 | nr = upid->nr; |
494 | } |
495 | return nr; |
496 | } |
497 | EXPORT_SYMBOL_GPL(pid_nr_ns); |
498 | |
499 | pid_t pid_vnr(struct pid *pid) |
500 | { |
501 | return pid_nr_ns(pid, task_active_pid_ns(current)); |
502 | } |
503 | EXPORT_SYMBOL_GPL(pid_vnr); |
504 | |
505 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
506 | struct pid_namespace *ns) |
507 | { |
508 | pid_t nr = 0; |
509 | |
510 | rcu_read_lock(); |
511 | if (!ns) |
512 | ns = task_active_pid_ns(current); |
513 | nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); |
514 | rcu_read_unlock(); |
515 | |
516 | return nr; |
517 | } |
518 | EXPORT_SYMBOL(__task_pid_nr_ns); |
519 | |
520 | struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) |
521 | { |
522 | return ns_of_pid(pid: task_pid(task: tsk)); |
523 | } |
524 | EXPORT_SYMBOL_GPL(task_active_pid_ns); |
525 | |
526 | /* |
527 | * Used by proc to find the first pid that is greater than or equal to nr. |
528 | * |
529 | * If there is a pid at nr this function is exactly the same as find_pid_ns. |
530 | */ |
531 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
532 | { |
533 | return idr_get_next(&ns->idr, nextid: &nr); |
534 | } |
535 | EXPORT_SYMBOL_GPL(find_ge_pid); |
536 | |
537 | struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) |
538 | { |
539 | struct fd f; |
540 | struct pid *pid; |
541 | |
542 | f = fdget(fd); |
543 | if (!f.file) |
544 | return ERR_PTR(error: -EBADF); |
545 | |
546 | pid = pidfd_pid(file: f.file); |
547 | if (!IS_ERR(ptr: pid)) { |
548 | get_pid(pid); |
549 | *flags = f.file->f_flags; |
550 | } |
551 | |
552 | fdput(fd: f); |
553 | return pid; |
554 | } |
555 | |
556 | /** |
557 | * pidfd_get_task() - Get the task associated with a pidfd |
558 | * |
559 | * @pidfd: pidfd for which to get the task |
560 | * @flags: flags associated with this pidfd |
561 | * |
562 | * Return the task associated with @pidfd. The function takes a reference on |
563 | * the returned task. The caller is responsible for releasing that reference. |
564 | * |
565 | * Return: On success, the task_struct associated with the pidfd. |
566 | * On error, a negative errno number will be returned. |
567 | */ |
568 | struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) |
569 | { |
570 | unsigned int f_flags; |
571 | struct pid *pid; |
572 | struct task_struct *task; |
573 | |
574 | pid = pidfd_get_pid(fd: pidfd, flags: &f_flags); |
575 | if (IS_ERR(ptr: pid)) |
576 | return ERR_CAST(ptr: pid); |
577 | |
578 | task = get_pid_task(pid, PIDTYPE_TGID); |
579 | put_pid(pid); |
580 | if (!task) |
581 | return ERR_PTR(error: -ESRCH); |
582 | |
583 | *flags = f_flags; |
584 | return task; |
585 | } |
586 | |
587 | /** |
588 | * pidfd_create() - Create a new pid file descriptor. |
589 | * |
590 | * @pid: struct pid that the pidfd will reference |
591 | * @flags: flags to pass |
592 | * |
593 | * This creates a new pid file descriptor with the O_CLOEXEC flag set. |
594 | * |
595 | * Note, that this function can only be called after the fd table has |
596 | * been unshared to avoid leaking the pidfd to the new process. |
597 | * |
598 | * This symbol should not be explicitly exported to loadable modules. |
599 | * |
600 | * Return: On success, a cloexec pidfd is returned. |
601 | * On error, a negative errno number will be returned. |
602 | */ |
603 | static int pidfd_create(struct pid *pid, unsigned int flags) |
604 | { |
605 | int pidfd; |
606 | struct file *pidfd_file; |
607 | |
608 | pidfd = pidfd_prepare(pid, flags, ret: &pidfd_file); |
609 | if (pidfd < 0) |
610 | return pidfd; |
611 | |
612 | fd_install(fd: pidfd, file: pidfd_file); |
613 | return pidfd; |
614 | } |
615 | |
616 | /** |
617 | * sys_pidfd_open() - Open new pid file descriptor. |
618 | * |
619 | * @pid: pid for which to retrieve a pidfd |
620 | * @flags: flags to pass |
621 | * |
622 | * This creates a new pid file descriptor with the O_CLOEXEC flag set for |
623 | * the task identified by @pid. Without PIDFD_THREAD flag the target task |
624 | * must be a thread-group leader. |
625 | * |
626 | * Return: On success, a cloexec pidfd is returned. |
627 | * On error, a negative errno number will be returned. |
628 | */ |
629 | SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) |
630 | { |
631 | int fd; |
632 | struct pid *p; |
633 | |
634 | if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD)) |
635 | return -EINVAL; |
636 | |
637 | if (pid <= 0) |
638 | return -EINVAL; |
639 | |
640 | p = find_get_pid(pid); |
641 | if (!p) |
642 | return -ESRCH; |
643 | |
644 | fd = pidfd_create(pid: p, flags); |
645 | |
646 | put_pid(p); |
647 | return fd; |
648 | } |
649 | |
650 | void __init pid_idr_init(void) |
651 | { |
652 | /* Verify no one has done anything silly: */ |
653 | BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); |
654 | |
655 | /* bump default and minimum pid_max based on number of cpus */ |
656 | pid_max = min(pid_max_max, max_t(int, pid_max, |
657 | PIDS_PER_CPU_DEFAULT * num_possible_cpus())); |
658 | pid_max_min = max_t(int, pid_max_min, |
659 | PIDS_PER_CPU_MIN * num_possible_cpus()); |
660 | pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); |
661 | |
662 | idr_init(idr: &init_pid_ns.idr); |
663 | |
664 | init_pid_ns.pid_cachep = kmem_cache_create(name: "pid", |
665 | struct_size_t(struct pid, numbers, 1), |
666 | align: __alignof__(struct pid), |
667 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, |
668 | NULL); |
669 | } |
670 | |
671 | static struct file *__pidfd_fget(struct task_struct *task, int fd) |
672 | { |
673 | struct file *file; |
674 | int ret; |
675 | |
676 | ret = down_read_killable(sem: &task->signal->exec_update_lock); |
677 | if (ret) |
678 | return ERR_PTR(error: ret); |
679 | |
680 | if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS)) |
681 | file = fget_task(task, fd); |
682 | else |
683 | file = ERR_PTR(error: -EPERM); |
684 | |
685 | up_read(sem: &task->signal->exec_update_lock); |
686 | |
687 | if (!file) { |
688 | /* |
689 | * It is possible that the target thread is exiting; it can be |
690 | * either: |
691 | * 1. before exit_signals(), which gives a real fd |
692 | * 2. before exit_files() takes the task_lock() gives a real fd |
693 | * 3. after exit_files() releases task_lock(), ->files is NULL; |
694 | * this has PF_EXITING, since it was set in exit_signals(), |
695 | * __pidfd_fget() returns EBADF. |
696 | * In case 3 we get EBADF, but that really means ESRCH, since |
697 | * the task is currently exiting and has freed its files |
698 | * struct, so we fix it up. |
699 | */ |
700 | if (task->flags & PF_EXITING) |
701 | file = ERR_PTR(error: -ESRCH); |
702 | else |
703 | file = ERR_PTR(error: -EBADF); |
704 | } |
705 | |
706 | return file; |
707 | } |
708 | |
709 | static int pidfd_getfd(struct pid *pid, int fd) |
710 | { |
711 | struct task_struct *task; |
712 | struct file *file; |
713 | int ret; |
714 | |
715 | task = get_pid_task(pid, PIDTYPE_PID); |
716 | if (!task) |
717 | return -ESRCH; |
718 | |
719 | file = __pidfd_fget(task, fd); |
720 | put_task_struct(t: task); |
721 | if (IS_ERR(ptr: file)) |
722 | return PTR_ERR(ptr: file); |
723 | |
724 | ret = receive_fd(file, NULL, O_CLOEXEC); |
725 | fput(file); |
726 | |
727 | return ret; |
728 | } |
729 | |
730 | /** |
731 | * sys_pidfd_getfd() - Get a file descriptor from another process |
732 | * |
733 | * @pidfd: the pidfd file descriptor of the process |
734 | * @fd: the file descriptor number to get |
735 | * @flags: flags on how to get the fd (reserved) |
736 | * |
737 | * This syscall gets a copy of a file descriptor from another process |
738 | * based on the pidfd, and file descriptor number. It requires that |
739 | * the calling process has the ability to ptrace the process represented |
740 | * by the pidfd. The process which is having its file descriptor copied |
741 | * is otherwise unaffected. |
742 | * |
743 | * Return: On success, a cloexec file descriptor is returned. |
744 | * On error, a negative errno number will be returned. |
745 | */ |
746 | SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd, |
747 | unsigned int, flags) |
748 | { |
749 | struct pid *pid; |
750 | struct fd f; |
751 | int ret; |
752 | |
753 | /* flags is currently unused - make sure it's unset */ |
754 | if (flags) |
755 | return -EINVAL; |
756 | |
757 | f = fdget(fd: pidfd); |
758 | if (!f.file) |
759 | return -EBADF; |
760 | |
761 | pid = pidfd_pid(file: f.file); |
762 | if (IS_ERR(ptr: pid)) |
763 | ret = PTR_ERR(ptr: pid); |
764 | else |
765 | ret = pidfd_getfd(pid, fd); |
766 | |
767 | fdput(fd: f); |
768 | return ret; |
769 | } |
770 |
Definitions
- init_struct_pid
- pid_max
- pid_max_min
- pid_max_max
- pidfs_ino
- init_pid_ns
- pidmap_lock
- put_pid
- delayed_put_pid
- free_pid
- alloc_pid
- disable_pid_allocation
- find_pid_ns
- find_vpid
- task_pid_ptr
- attach_pid
- __change_pid
- detach_pid
- change_pid
- exchange_tids
- transfer_pid
- pid_task
- find_task_by_pid_ns
- find_task_by_vpid
- find_get_task_by_vpid
- get_task_pid
- get_pid_task
- find_get_pid
- pid_nr_ns
- pid_vnr
- __task_pid_nr_ns
- task_active_pid_ns
- find_ge_pid
- pidfd_get_pid
- pidfd_get_task
- pidfd_create
- pid_idr_init
- __pidfd_fget
Improve your Profiling and Debugging skills
Find out more