1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Generic pidhash and scalable, time-bounded PID allocator |
4 | * |
5 | * (C) 2002-2003 Nadia Yvette Chambers, IBM |
6 | * (C) 2004 Nadia Yvette Chambers, Oracle |
7 | * (C) 2002-2004 Ingo Molnar, Red Hat |
8 | * |
9 | * pid-structures are backing objects for tasks sharing a given ID to chain |
10 | * against. There is very little to them aside from hashing them and |
11 | * parking tasks using given ID's on a list. |
12 | * |
13 | * The hash is always changed with the tasklist_lock write-acquired, |
14 | * and the hash is only accessed with the tasklist_lock at least |
15 | * read-acquired, so there's no additional SMP locking needed here. |
16 | * |
17 | * We have a list of bitmap pages, which bitmaps represent the PID space. |
18 | * Allocating and freeing PIDs is completely lockless. The worst-case |
19 | * allocation scenario when all but one out of 1 million PIDs possible are |
20 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE |
21 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). |
22 | * |
23 | * Pid namespaces: |
24 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
25 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
26 | * Many thanks to Oleg Nesterov for comments and help |
27 | * |
28 | */ |
29 | |
30 | #include <linux/mm.h> |
31 | #include <linux/export.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/init.h> |
34 | #include <linux/rculist.h> |
35 | #include <linux/memblock.h> |
36 | #include <linux/pid_namespace.h> |
37 | #include <linux/init_task.h> |
38 | #include <linux/syscalls.h> |
39 | #include <linux/proc_ns.h> |
40 | #include <linux/refcount.h> |
41 | #include <linux/anon_inodes.h> |
42 | #include <linux/sched/signal.h> |
43 | #include <linux/sched/task.h> |
44 | #include <linux/idr.h> |
45 | #include <net/sock.h> |
46 | #include <uapi/linux/pidfd.h> |
47 | |
48 | struct pid init_struct_pid = { |
49 | .count = REFCOUNT_INIT(1), |
50 | .tasks = { |
51 | { .first = NULL }, |
52 | { .first = NULL }, |
53 | { .first = NULL }, |
54 | }, |
55 | .level = 0, |
56 | .numbers = { { |
57 | .nr = 0, |
58 | .ns = &init_pid_ns, |
59 | }, } |
60 | }; |
61 | |
62 | int pid_max = PID_MAX_DEFAULT; |
63 | |
64 | #define RESERVED_PIDS 300 |
65 | |
66 | int pid_max_min = RESERVED_PIDS + 1; |
67 | int pid_max_max = PID_MAX_LIMIT; |
68 | |
69 | /* |
70 | * PID-map pages start out as NULL, they get allocated upon |
71 | * first use and are never deallocated. This way a low pid_max |
72 | * value does not cause lots of bitmaps to be allocated, but |
73 | * the scheme scales to up to 4 million PIDs, runtime. |
74 | */ |
75 | struct pid_namespace init_pid_ns = { |
76 | .ns.count = REFCOUNT_INIT(2), |
77 | .idr = IDR_INIT(init_pid_ns.idr), |
78 | .pid_allocated = PIDNS_ADDING, |
79 | .level = 0, |
80 | .child_reaper = &init_task, |
81 | .user_ns = &init_user_ns, |
82 | .ns.inum = PROC_PID_INIT_INO, |
83 | #ifdef CONFIG_PID_NS |
84 | .ns.ops = &pidns_operations, |
85 | #endif |
86 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) |
87 | .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC, |
88 | #endif |
89 | }; |
90 | EXPORT_SYMBOL_GPL(init_pid_ns); |
91 | |
92 | /* |
93 | * Note: disable interrupts while the pidmap_lock is held as an |
94 | * interrupt might come in and do read_lock(&tasklist_lock). |
95 | * |
96 | * If we don't disable interrupts there is a nasty deadlock between |
97 | * detach_pid()->free_pid() and another cpu that does |
98 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does |
99 | * read_lock(&tasklist_lock); |
100 | * |
101 | * After we clean up the tasklist_lock and know there are no |
102 | * irq handlers that take it we can leave the interrupts enabled. |
103 | * For now it is easier to be safe than to prove it can't happen. |
104 | */ |
105 | |
106 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
107 | |
108 | void put_pid(struct pid *pid) |
109 | { |
110 | struct pid_namespace *ns; |
111 | |
112 | if (!pid) |
113 | return; |
114 | |
115 | ns = pid->numbers[pid->level].ns; |
116 | if (refcount_dec_and_test(r: &pid->count)) { |
117 | kmem_cache_free(s: ns->pid_cachep, objp: pid); |
118 | put_pid_ns(ns); |
119 | } |
120 | } |
121 | EXPORT_SYMBOL_GPL(put_pid); |
122 | |
123 | static void delayed_put_pid(struct rcu_head *rhp) |
124 | { |
125 | struct pid *pid = container_of(rhp, struct pid, rcu); |
126 | put_pid(pid); |
127 | } |
128 | |
129 | void free_pid(struct pid *pid) |
130 | { |
131 | /* We can be called with write_lock_irq(&tasklist_lock) held */ |
132 | int i; |
133 | unsigned long flags; |
134 | |
135 | spin_lock_irqsave(&pidmap_lock, flags); |
136 | for (i = 0; i <= pid->level; i++) { |
137 | struct upid *upid = pid->numbers + i; |
138 | struct pid_namespace *ns = upid->ns; |
139 | switch (--ns->pid_allocated) { |
140 | case 2: |
141 | case 1: |
142 | /* When all that is left in the pid namespace |
143 | * is the reaper wake up the reaper. The reaper |
144 | * may be sleeping in zap_pid_ns_processes(). |
145 | */ |
146 | wake_up_process(tsk: ns->child_reaper); |
147 | break; |
148 | case PIDNS_ADDING: |
149 | /* Handle a fork failure of the first process */ |
150 | WARN_ON(ns->child_reaper); |
151 | ns->pid_allocated = 0; |
152 | break; |
153 | } |
154 | |
155 | idr_remove(&ns->idr, id: upid->nr); |
156 | } |
157 | spin_unlock_irqrestore(lock: &pidmap_lock, flags); |
158 | |
159 | call_rcu(head: &pid->rcu, func: delayed_put_pid); |
160 | } |
161 | |
162 | struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, |
163 | size_t set_tid_size) |
164 | { |
165 | struct pid *pid; |
166 | enum pid_type type; |
167 | int i, nr; |
168 | struct pid_namespace *tmp; |
169 | struct upid *upid; |
170 | int retval = -ENOMEM; |
171 | |
172 | /* |
173 | * set_tid_size contains the size of the set_tid array. Starting at |
174 | * the most nested currently active PID namespace it tells alloc_pid() |
175 | * which PID to set for a process in that most nested PID namespace |
176 | * up to set_tid_size PID namespaces. It does not have to set the PID |
177 | * for a process in all nested PID namespaces but set_tid_size must |
178 | * never be greater than the current ns->level + 1. |
179 | */ |
180 | if (set_tid_size > ns->level + 1) |
181 | return ERR_PTR(error: -EINVAL); |
182 | |
183 | pid = kmem_cache_alloc(cachep: ns->pid_cachep, GFP_KERNEL); |
184 | if (!pid) |
185 | return ERR_PTR(error: retval); |
186 | |
187 | tmp = ns; |
188 | pid->level = ns->level; |
189 | |
190 | for (i = ns->level; i >= 0; i--) { |
191 | int tid = 0; |
192 | |
193 | if (set_tid_size) { |
194 | tid = set_tid[ns->level - i]; |
195 | |
196 | retval = -EINVAL; |
197 | if (tid < 1 || tid >= pid_max) |
198 | goto out_free; |
199 | /* |
200 | * Also fail if a PID != 1 is requested and |
201 | * no PID 1 exists. |
202 | */ |
203 | if (tid != 1 && !tmp->child_reaper) |
204 | goto out_free; |
205 | retval = -EPERM; |
206 | if (!checkpoint_restore_ns_capable(ns: tmp->user_ns)) |
207 | goto out_free; |
208 | set_tid_size--; |
209 | } |
210 | |
211 | idr_preload(GFP_KERNEL); |
212 | spin_lock_irq(lock: &pidmap_lock); |
213 | |
214 | if (tid) { |
215 | nr = idr_alloc(&tmp->idr, NULL, start: tid, |
216 | end: tid + 1, GFP_ATOMIC); |
217 | /* |
218 | * If ENOSPC is returned it means that the PID is |
219 | * alreay in use. Return EEXIST in that case. |
220 | */ |
221 | if (nr == -ENOSPC) |
222 | nr = -EEXIST; |
223 | } else { |
224 | int pid_min = 1; |
225 | /* |
226 | * init really needs pid 1, but after reaching the |
227 | * maximum wrap back to RESERVED_PIDS |
228 | */ |
229 | if (idr_get_cursor(idr: &tmp->idr) > RESERVED_PIDS) |
230 | pid_min = RESERVED_PIDS; |
231 | |
232 | /* |
233 | * Store a null pointer so find_pid_ns does not find |
234 | * a partially initialized PID (see below). |
235 | */ |
236 | nr = idr_alloc_cyclic(&tmp->idr, NULL, start: pid_min, |
237 | end: pid_max, GFP_ATOMIC); |
238 | } |
239 | spin_unlock_irq(lock: &pidmap_lock); |
240 | idr_preload_end(); |
241 | |
242 | if (nr < 0) { |
243 | retval = (nr == -ENOSPC) ? -EAGAIN : nr; |
244 | goto out_free; |
245 | } |
246 | |
247 | pid->numbers[i].nr = nr; |
248 | pid->numbers[i].ns = tmp; |
249 | tmp = tmp->parent; |
250 | } |
251 | |
252 | /* |
253 | * ENOMEM is not the most obvious choice especially for the case |
254 | * where the child subreaper has already exited and the pid |
255 | * namespace denies the creation of any new processes. But ENOMEM |
256 | * is what we have exposed to userspace for a long time and it is |
257 | * documented behavior for pid namespaces. So we can't easily |
258 | * change it even if there were an error code better suited. |
259 | */ |
260 | retval = -ENOMEM; |
261 | |
262 | get_pid_ns(ns); |
263 | refcount_set(r: &pid->count, n: 1); |
264 | spin_lock_init(&pid->lock); |
265 | for (type = 0; type < PIDTYPE_MAX; ++type) |
266 | INIT_HLIST_HEAD(&pid->tasks[type]); |
267 | |
268 | init_waitqueue_head(&pid->wait_pidfd); |
269 | INIT_HLIST_HEAD(&pid->inodes); |
270 | |
271 | upid = pid->numbers + ns->level; |
272 | spin_lock_irq(lock: &pidmap_lock); |
273 | if (!(ns->pid_allocated & PIDNS_ADDING)) |
274 | goto out_unlock; |
275 | for ( ; upid >= pid->numbers; --upid) { |
276 | /* Make the PID visible to find_pid_ns. */ |
277 | idr_replace(&upid->ns->idr, pid, id: upid->nr); |
278 | upid->ns->pid_allocated++; |
279 | } |
280 | spin_unlock_irq(lock: &pidmap_lock); |
281 | |
282 | return pid; |
283 | |
284 | out_unlock: |
285 | spin_unlock_irq(lock: &pidmap_lock); |
286 | put_pid_ns(ns); |
287 | |
288 | out_free: |
289 | spin_lock_irq(lock: &pidmap_lock); |
290 | while (++i <= ns->level) { |
291 | upid = pid->numbers + i; |
292 | idr_remove(&upid->ns->idr, id: upid->nr); |
293 | } |
294 | |
295 | /* On failure to allocate the first pid, reset the state */ |
296 | if (ns->pid_allocated == PIDNS_ADDING) |
297 | idr_set_cursor(idr: &ns->idr, val: 0); |
298 | |
299 | spin_unlock_irq(lock: &pidmap_lock); |
300 | |
301 | kmem_cache_free(s: ns->pid_cachep, objp: pid); |
302 | return ERR_PTR(error: retval); |
303 | } |
304 | |
305 | void disable_pid_allocation(struct pid_namespace *ns) |
306 | { |
307 | spin_lock_irq(lock: &pidmap_lock); |
308 | ns->pid_allocated &= ~PIDNS_ADDING; |
309 | spin_unlock_irq(lock: &pidmap_lock); |
310 | } |
311 | |
312 | struct pid *find_pid_ns(int nr, struct pid_namespace *ns) |
313 | { |
314 | return idr_find(&ns->idr, id: nr); |
315 | } |
316 | EXPORT_SYMBOL_GPL(find_pid_ns); |
317 | |
318 | struct pid *find_vpid(int nr) |
319 | { |
320 | return find_pid_ns(nr, task_active_pid_ns(current)); |
321 | } |
322 | EXPORT_SYMBOL_GPL(find_vpid); |
323 | |
324 | static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) |
325 | { |
326 | return (type == PIDTYPE_PID) ? |
327 | &task->thread_pid : |
328 | &task->signal->pids[type]; |
329 | } |
330 | |
331 | /* |
332 | * attach_pid() must be called with the tasklist_lock write-held. |
333 | */ |
334 | void attach_pid(struct task_struct *task, enum pid_type type) |
335 | { |
336 | struct pid *pid = *task_pid_ptr(task, type); |
337 | hlist_add_head_rcu(n: &task->pid_links[type], h: &pid->tasks[type]); |
338 | } |
339 | |
340 | static void __change_pid(struct task_struct *task, enum pid_type type, |
341 | struct pid *new) |
342 | { |
343 | struct pid **pid_ptr = task_pid_ptr(task, type); |
344 | struct pid *pid; |
345 | int tmp; |
346 | |
347 | pid = *pid_ptr; |
348 | |
349 | hlist_del_rcu(n: &task->pid_links[type]); |
350 | *pid_ptr = new; |
351 | |
352 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
353 | if (pid_has_task(pid, type: tmp)) |
354 | return; |
355 | |
356 | free_pid(pid); |
357 | } |
358 | |
359 | void detach_pid(struct task_struct *task, enum pid_type type) |
360 | { |
361 | __change_pid(task, type, NULL); |
362 | } |
363 | |
364 | void change_pid(struct task_struct *task, enum pid_type type, |
365 | struct pid *pid) |
366 | { |
367 | __change_pid(task, type, new: pid); |
368 | attach_pid(task, type); |
369 | } |
370 | |
371 | void exchange_tids(struct task_struct *left, struct task_struct *right) |
372 | { |
373 | struct pid *pid1 = left->thread_pid; |
374 | struct pid *pid2 = right->thread_pid; |
375 | struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID]; |
376 | struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID]; |
377 | |
378 | /* Swap the single entry tid lists */ |
379 | hlists_swap_heads_rcu(left: head1, right: head2); |
380 | |
381 | /* Swap the per task_struct pid */ |
382 | rcu_assign_pointer(left->thread_pid, pid2); |
383 | rcu_assign_pointer(right->thread_pid, pid1); |
384 | |
385 | /* Swap the cached value */ |
386 | WRITE_ONCE(left->pid, pid_nr(pid2)); |
387 | WRITE_ONCE(right->pid, pid_nr(pid1)); |
388 | } |
389 | |
390 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
391 | void transfer_pid(struct task_struct *old, struct task_struct *new, |
392 | enum pid_type type) |
393 | { |
394 | if (type == PIDTYPE_PID) |
395 | new->thread_pid = old->thread_pid; |
396 | hlist_replace_rcu(old: &old->pid_links[type], new: &new->pid_links[type]); |
397 | } |
398 | |
399 | struct task_struct *pid_task(struct pid *pid, enum pid_type type) |
400 | { |
401 | struct task_struct *result = NULL; |
402 | if (pid) { |
403 | struct hlist_node *first; |
404 | first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), |
405 | lockdep_tasklist_lock_is_held()); |
406 | if (first) |
407 | result = hlist_entry(first, struct task_struct, pid_links[(type)]); |
408 | } |
409 | return result; |
410 | } |
411 | EXPORT_SYMBOL(pid_task); |
412 | |
413 | /* |
414 | * Must be called under rcu_read_lock(). |
415 | */ |
416 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) |
417 | { |
418 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), |
419 | "find_task_by_pid_ns() needs rcu_read_lock() protection" ); |
420 | return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); |
421 | } |
422 | |
423 | struct task_struct *find_task_by_vpid(pid_t vnr) |
424 | { |
425 | return find_task_by_pid_ns(nr: vnr, ns: task_active_pid_ns(current)); |
426 | } |
427 | |
428 | struct task_struct *find_get_task_by_vpid(pid_t nr) |
429 | { |
430 | struct task_struct *task; |
431 | |
432 | rcu_read_lock(); |
433 | task = find_task_by_vpid(vnr: nr); |
434 | if (task) |
435 | get_task_struct(t: task); |
436 | rcu_read_unlock(); |
437 | |
438 | return task; |
439 | } |
440 | |
441 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
442 | { |
443 | struct pid *pid; |
444 | rcu_read_lock(); |
445 | pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); |
446 | rcu_read_unlock(); |
447 | return pid; |
448 | } |
449 | EXPORT_SYMBOL_GPL(get_task_pid); |
450 | |
451 | struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) |
452 | { |
453 | struct task_struct *result; |
454 | rcu_read_lock(); |
455 | result = pid_task(pid, type); |
456 | if (result) |
457 | get_task_struct(t: result); |
458 | rcu_read_unlock(); |
459 | return result; |
460 | } |
461 | EXPORT_SYMBOL_GPL(get_pid_task); |
462 | |
463 | struct pid *find_get_pid(pid_t nr) |
464 | { |
465 | struct pid *pid; |
466 | |
467 | rcu_read_lock(); |
468 | pid = get_pid(pid: find_vpid(nr)); |
469 | rcu_read_unlock(); |
470 | |
471 | return pid; |
472 | } |
473 | EXPORT_SYMBOL_GPL(find_get_pid); |
474 | |
475 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
476 | { |
477 | struct upid *upid; |
478 | pid_t nr = 0; |
479 | |
480 | if (pid && ns->level <= pid->level) { |
481 | upid = &pid->numbers[ns->level]; |
482 | if (upid->ns == ns) |
483 | nr = upid->nr; |
484 | } |
485 | return nr; |
486 | } |
487 | EXPORT_SYMBOL_GPL(pid_nr_ns); |
488 | |
489 | pid_t pid_vnr(struct pid *pid) |
490 | { |
491 | return pid_nr_ns(pid, task_active_pid_ns(current)); |
492 | } |
493 | EXPORT_SYMBOL_GPL(pid_vnr); |
494 | |
495 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
496 | struct pid_namespace *ns) |
497 | { |
498 | pid_t nr = 0; |
499 | |
500 | rcu_read_lock(); |
501 | if (!ns) |
502 | ns = task_active_pid_ns(current); |
503 | nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); |
504 | rcu_read_unlock(); |
505 | |
506 | return nr; |
507 | } |
508 | EXPORT_SYMBOL(__task_pid_nr_ns); |
509 | |
510 | struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) |
511 | { |
512 | return ns_of_pid(pid: task_pid(task: tsk)); |
513 | } |
514 | EXPORT_SYMBOL_GPL(task_active_pid_ns); |
515 | |
516 | /* |
517 | * Used by proc to find the first pid that is greater than or equal to nr. |
518 | * |
519 | * If there is a pid at nr this function is exactly the same as find_pid_ns. |
520 | */ |
521 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
522 | { |
523 | return idr_get_next(&ns->idr, nextid: &nr); |
524 | } |
525 | EXPORT_SYMBOL_GPL(find_ge_pid); |
526 | |
527 | struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) |
528 | { |
529 | struct fd f; |
530 | struct pid *pid; |
531 | |
532 | f = fdget(fd); |
533 | if (!f.file) |
534 | return ERR_PTR(error: -EBADF); |
535 | |
536 | pid = pidfd_pid(file: f.file); |
537 | if (!IS_ERR(ptr: pid)) { |
538 | get_pid(pid); |
539 | *flags = f.file->f_flags; |
540 | } |
541 | |
542 | fdput(fd: f); |
543 | return pid; |
544 | } |
545 | |
546 | /** |
547 | * pidfd_get_task() - Get the task associated with a pidfd |
548 | * |
549 | * @pidfd: pidfd for which to get the task |
550 | * @flags: flags associated with this pidfd |
551 | * |
552 | * Return the task associated with @pidfd. The function takes a reference on |
553 | * the returned task. The caller is responsible for releasing that reference. |
554 | * |
555 | * Currently, the process identified by @pidfd is always a thread-group leader. |
556 | * This restriction currently exists for all aspects of pidfds including pidfd |
557 | * creation (CLONE_PIDFD cannot be used with CLONE_THREAD) and pidfd polling |
558 | * (only supports thread group leaders). |
559 | * |
560 | * Return: On success, the task_struct associated with the pidfd. |
561 | * On error, a negative errno number will be returned. |
562 | */ |
563 | struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) |
564 | { |
565 | unsigned int f_flags; |
566 | struct pid *pid; |
567 | struct task_struct *task; |
568 | |
569 | pid = pidfd_get_pid(fd: pidfd, flags: &f_flags); |
570 | if (IS_ERR(ptr: pid)) |
571 | return ERR_CAST(ptr: pid); |
572 | |
573 | task = get_pid_task(pid, PIDTYPE_TGID); |
574 | put_pid(pid); |
575 | if (!task) |
576 | return ERR_PTR(error: -ESRCH); |
577 | |
578 | *flags = f_flags; |
579 | return task; |
580 | } |
581 | |
582 | /** |
583 | * pidfd_create() - Create a new pid file descriptor. |
584 | * |
585 | * @pid: struct pid that the pidfd will reference |
586 | * @flags: flags to pass |
587 | * |
588 | * This creates a new pid file descriptor with the O_CLOEXEC flag set. |
589 | * |
590 | * Note, that this function can only be called after the fd table has |
591 | * been unshared to avoid leaking the pidfd to the new process. |
592 | * |
593 | * This symbol should not be explicitly exported to loadable modules. |
594 | * |
595 | * Return: On success, a cloexec pidfd is returned. |
596 | * On error, a negative errno number will be returned. |
597 | */ |
598 | int pidfd_create(struct pid *pid, unsigned int flags) |
599 | { |
600 | int pidfd; |
601 | struct file *pidfd_file; |
602 | |
603 | pidfd = pidfd_prepare(pid, flags, ret: &pidfd_file); |
604 | if (pidfd < 0) |
605 | return pidfd; |
606 | |
607 | fd_install(fd: pidfd, file: pidfd_file); |
608 | return pidfd; |
609 | } |
610 | |
611 | /** |
612 | * sys_pidfd_open() - Open new pid file descriptor. |
613 | * |
614 | * @pid: pid for which to retrieve a pidfd |
615 | * @flags: flags to pass |
616 | * |
617 | * This creates a new pid file descriptor with the O_CLOEXEC flag set for |
618 | * the process identified by @pid. Currently, the process identified by |
619 | * @pid must be a thread-group leader. This restriction currently exists |
620 | * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot |
621 | * be used with CLONE_THREAD) and pidfd polling (only supports thread group |
622 | * leaders). |
623 | * |
624 | * Return: On success, a cloexec pidfd is returned. |
625 | * On error, a negative errno number will be returned. |
626 | */ |
627 | SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) |
628 | { |
629 | int fd; |
630 | struct pid *p; |
631 | |
632 | if (flags & ~PIDFD_NONBLOCK) |
633 | return -EINVAL; |
634 | |
635 | if (pid <= 0) |
636 | return -EINVAL; |
637 | |
638 | p = find_get_pid(pid); |
639 | if (!p) |
640 | return -ESRCH; |
641 | |
642 | fd = pidfd_create(pid: p, flags); |
643 | |
644 | put_pid(p); |
645 | return fd; |
646 | } |
647 | |
648 | void __init pid_idr_init(void) |
649 | { |
650 | /* Verify no one has done anything silly: */ |
651 | BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); |
652 | |
653 | /* bump default and minimum pid_max based on number of cpus */ |
654 | pid_max = min(pid_max_max, max_t(int, pid_max, |
655 | PIDS_PER_CPU_DEFAULT * num_possible_cpus())); |
656 | pid_max_min = max_t(int, pid_max_min, |
657 | PIDS_PER_CPU_MIN * num_possible_cpus()); |
658 | pr_info("pid_max: default: %u minimum: %u\n" , pid_max, pid_max_min); |
659 | |
660 | idr_init(idr: &init_pid_ns.idr); |
661 | |
662 | init_pid_ns.pid_cachep = kmem_cache_create(name: "pid" , |
663 | struct_size_t(struct pid, numbers, 1), |
664 | align: __alignof__(struct pid), |
665 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, |
666 | NULL); |
667 | } |
668 | |
669 | static struct file *__pidfd_fget(struct task_struct *task, int fd) |
670 | { |
671 | struct file *file; |
672 | int ret; |
673 | |
674 | ret = down_read_killable(sem: &task->signal->exec_update_lock); |
675 | if (ret) |
676 | return ERR_PTR(error: ret); |
677 | |
678 | if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS)) |
679 | file = fget_task(task, fd); |
680 | else |
681 | file = ERR_PTR(error: -EPERM); |
682 | |
683 | up_read(sem: &task->signal->exec_update_lock); |
684 | |
685 | return file ?: ERR_PTR(error: -EBADF); |
686 | } |
687 | |
688 | static int pidfd_getfd(struct pid *pid, int fd) |
689 | { |
690 | struct task_struct *task; |
691 | struct file *file; |
692 | int ret; |
693 | |
694 | task = get_pid_task(pid, PIDTYPE_PID); |
695 | if (!task) |
696 | return -ESRCH; |
697 | |
698 | file = __pidfd_fget(task, fd); |
699 | put_task_struct(t: task); |
700 | if (IS_ERR(ptr: file)) |
701 | return PTR_ERR(ptr: file); |
702 | |
703 | ret = receive_fd(file, O_CLOEXEC); |
704 | fput(file); |
705 | |
706 | return ret; |
707 | } |
708 | |
709 | /** |
710 | * sys_pidfd_getfd() - Get a file descriptor from another process |
711 | * |
712 | * @pidfd: the pidfd file descriptor of the process |
713 | * @fd: the file descriptor number to get |
714 | * @flags: flags on how to get the fd (reserved) |
715 | * |
716 | * This syscall gets a copy of a file descriptor from another process |
717 | * based on the pidfd, and file descriptor number. It requires that |
718 | * the calling process has the ability to ptrace the process represented |
719 | * by the pidfd. The process which is having its file descriptor copied |
720 | * is otherwise unaffected. |
721 | * |
722 | * Return: On success, a cloexec file descriptor is returned. |
723 | * On error, a negative errno number will be returned. |
724 | */ |
725 | SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd, |
726 | unsigned int, flags) |
727 | { |
728 | struct pid *pid; |
729 | struct fd f; |
730 | int ret; |
731 | |
732 | /* flags is currently unused - make sure it's unset */ |
733 | if (flags) |
734 | return -EINVAL; |
735 | |
736 | f = fdget(fd: pidfd); |
737 | if (!f.file) |
738 | return -EBADF; |
739 | |
740 | pid = pidfd_pid(file: f.file); |
741 | if (IS_ERR(ptr: pid)) |
742 | ret = PTR_ERR(ptr: pid); |
743 | else |
744 | ret = pidfd_getfd(pid, fd); |
745 | |
746 | fdput(fd: f); |
747 | return ret; |
748 | } |
749 | |