1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/module.h> |
3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> |
5 | #include <linux/socket.h> |
6 | #include <linux/net.h> |
7 | #include <linux/fs.h> |
8 | #include <net/af_unix.h> |
9 | #include <net/scm.h> |
10 | #include <linux/init.h> |
11 | #include <linux/io_uring.h> |
12 | |
13 | #include "scm.h" |
14 | |
15 | unsigned int unix_tot_inflight; |
16 | EXPORT_SYMBOL(unix_tot_inflight); |
17 | |
18 | LIST_HEAD(gc_inflight_list); |
19 | EXPORT_SYMBOL(gc_inflight_list); |
20 | |
21 | DEFINE_SPINLOCK(unix_gc_lock); |
22 | EXPORT_SYMBOL(unix_gc_lock); |
23 | |
24 | struct sock *unix_get_socket(struct file *filp) |
25 | { |
26 | struct sock *u_sock = NULL; |
27 | struct inode *inode = file_inode(f: filp); |
28 | |
29 | /* Socket ? */ |
30 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { |
31 | struct socket *sock = SOCKET_I(inode); |
32 | const struct proto_ops *ops = READ_ONCE(sock->ops); |
33 | struct sock *s = sock->sk; |
34 | |
35 | /* PF_UNIX ? */ |
36 | if (s && ops && ops->family == PF_UNIX) |
37 | u_sock = s; |
38 | } else { |
39 | /* Could be an io_uring instance */ |
40 | u_sock = io_uring_get_socket(file: filp); |
41 | } |
42 | return u_sock; |
43 | } |
44 | EXPORT_SYMBOL(unix_get_socket); |
45 | |
46 | /* Keep the number of times in flight count for the file |
47 | * descriptor if it is for an AF_UNIX socket. |
48 | */ |
49 | void unix_inflight(struct user_struct *user, struct file *fp) |
50 | { |
51 | struct sock *s = unix_get_socket(fp); |
52 | |
53 | spin_lock(lock: &unix_gc_lock); |
54 | |
55 | if (s) { |
56 | struct unix_sock *u = unix_sk(s); |
57 | |
58 | if (atomic_long_inc_return(v: &u->inflight) == 1) { |
59 | BUG_ON(!list_empty(&u->link)); |
60 | list_add_tail(new: &u->link, head: &gc_inflight_list); |
61 | } else { |
62 | BUG_ON(list_empty(&u->link)); |
63 | } |
64 | /* Paired with READ_ONCE() in wait_for_unix_gc() */ |
65 | WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); |
66 | } |
67 | WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); |
68 | spin_unlock(lock: &unix_gc_lock); |
69 | } |
70 | |
71 | void unix_notinflight(struct user_struct *user, struct file *fp) |
72 | { |
73 | struct sock *s = unix_get_socket(fp); |
74 | |
75 | spin_lock(lock: &unix_gc_lock); |
76 | |
77 | if (s) { |
78 | struct unix_sock *u = unix_sk(s); |
79 | |
80 | BUG_ON(!atomic_long_read(&u->inflight)); |
81 | BUG_ON(list_empty(&u->link)); |
82 | |
83 | if (atomic_long_dec_and_test(v: &u->inflight)) |
84 | list_del_init(entry: &u->link); |
85 | /* Paired with READ_ONCE() in wait_for_unix_gc() */ |
86 | WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); |
87 | } |
88 | WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); |
89 | spin_unlock(lock: &unix_gc_lock); |
90 | } |
91 | |
92 | /* |
93 | * The "user->unix_inflight" variable is protected by the garbage |
94 | * collection lock, and we just read it locklessly here. If you go |
95 | * over the limit, there might be a tiny race in actually noticing |
96 | * it across threads. Tough. |
97 | */ |
98 | static inline bool too_many_unix_fds(struct task_struct *p) |
99 | { |
100 | struct user_struct *user = current_user(); |
101 | |
102 | if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) |
103 | return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); |
104 | return false; |
105 | } |
106 | |
107 | int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) |
108 | { |
109 | int i; |
110 | |
111 | if (too_many_unix_fds(current)) |
112 | return -ETOOMANYREFS; |
113 | |
114 | /* |
115 | * Need to duplicate file references for the sake of garbage |
116 | * collection. Otherwise a socket in the fps might become a |
117 | * candidate for GC while the skb is not yet queued. |
118 | */ |
119 | UNIXCB(skb).fp = scm_fp_dup(fpl: scm->fp); |
120 | if (!UNIXCB(skb).fp) |
121 | return -ENOMEM; |
122 | |
123 | for (i = scm->fp->count - 1; i >= 0; i--) |
124 | unix_inflight(user: scm->fp->user, fp: scm->fp->fp[i]); |
125 | return 0; |
126 | } |
127 | EXPORT_SYMBOL(unix_attach_fds); |
128 | |
129 | void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) |
130 | { |
131 | int i; |
132 | |
133 | scm->fp = UNIXCB(skb).fp; |
134 | UNIXCB(skb).fp = NULL; |
135 | |
136 | for (i = scm->fp->count-1; i >= 0; i--) |
137 | unix_notinflight(user: scm->fp->user, fp: scm->fp->fp[i]); |
138 | } |
139 | EXPORT_SYMBOL(unix_detach_fds); |
140 | |
141 | void unix_destruct_scm(struct sk_buff *skb) |
142 | { |
143 | struct scm_cookie scm; |
144 | |
145 | memset(&scm, 0, sizeof(scm)); |
146 | scm.pid = UNIXCB(skb).pid; |
147 | if (UNIXCB(skb).fp) |
148 | unix_detach_fds(&scm, skb); |
149 | |
150 | /* Alas, it calls VFS */ |
151 | /* So fscking what? fput() had been SMP-safe since the last Summer */ |
152 | scm_destroy(scm: &scm); |
153 | sock_wfree(skb); |
154 | } |
155 | EXPORT_SYMBOL(unix_destruct_scm); |
156 | |
157 | void io_uring_destruct_scm(struct sk_buff *skb) |
158 | { |
159 | unix_destruct_scm(skb); |
160 | } |
161 | EXPORT_SYMBOL(io_uring_destruct_scm); |
162 | |