1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> |
3 | #include <linux/errno.h> |
4 | #include <linux/file.h> |
5 | #include <linux/io_uring/cmd.h> |
6 | #include <linux/security.h> |
7 | #include <linux/nospec.h> |
8 | #include <net/sock.h> |
9 | |
10 | #include <uapi/linux/io_uring.h> |
11 | #include <asm/ioctls.h> |
12 | |
13 | #include "io_uring.h" |
14 | #include "rsrc.h" |
15 | #include "uring_cmd.h" |
16 | |
17 | static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd, |
18 | unsigned int issue_flags) |
19 | { |
20 | struct io_kiocb *req = cmd_to_io_kiocb(cmd); |
21 | struct io_ring_ctx *ctx = req->ctx; |
22 | |
23 | if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) |
24 | return; |
25 | |
26 | cmd->flags &= ~IORING_URING_CMD_CANCELABLE; |
27 | io_ring_submit_lock(ctx, issue_flags); |
28 | hlist_del(n: &req->hash_node); |
29 | io_ring_submit_unlock(ctx, issue_flags); |
30 | } |
31 | |
32 | /* |
33 | * Mark this command as concelable, then io_uring_try_cancel_uring_cmd() |
34 | * will try to cancel this issued command by sending ->uring_cmd() with |
35 | * issue_flags of IO_URING_F_CANCEL. |
36 | * |
37 | * The command is guaranteed to not be done when calling ->uring_cmd() |
38 | * with IO_URING_F_CANCEL, but it is driver's responsibility to deal |
39 | * with race between io_uring canceling and normal completion. |
40 | */ |
41 | void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, |
42 | unsigned int issue_flags) |
43 | { |
44 | struct io_kiocb *req = cmd_to_io_kiocb(cmd); |
45 | struct io_ring_ctx *ctx = req->ctx; |
46 | |
47 | if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) { |
48 | cmd->flags |= IORING_URING_CMD_CANCELABLE; |
49 | io_ring_submit_lock(ctx, issue_flags); |
50 | hlist_add_head(n: &req->hash_node, h: &ctx->cancelable_uring_cmd); |
51 | io_ring_submit_unlock(ctx, issue_flags); |
52 | } |
53 | } |
54 | EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable); |
55 | |
56 | static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts) |
57 | { |
58 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); |
59 | unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; |
60 | |
61 | ioucmd->task_work_cb(ioucmd, issue_flags); |
62 | } |
63 | |
64 | void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, |
65 | void (*task_work_cb)(struct io_uring_cmd *, unsigned), |
66 | unsigned flags) |
67 | { |
68 | struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); |
69 | |
70 | ioucmd->task_work_cb = task_work_cb; |
71 | req->io_task_work.func = io_uring_cmd_work; |
72 | __io_req_task_work_add(req, flags); |
73 | } |
74 | EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task); |
75 | |
76 | static inline void (struct io_kiocb *req, |
77 | u64 , u64 ) |
78 | { |
79 | req->big_cqe.extra1 = extra1; |
80 | req->big_cqe.extra2 = extra2; |
81 | } |
82 | |
83 | /* |
84 | * Called by consumers of io_uring_cmd, if they originally returned |
85 | * -EIOCBQUEUED upon receiving the command. |
86 | */ |
87 | void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, |
88 | unsigned issue_flags) |
89 | { |
90 | struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); |
91 | |
92 | io_uring_cmd_del_cancelable(cmd: ioucmd, issue_flags); |
93 | |
94 | if (ret < 0) |
95 | req_set_fail(req); |
96 | |
97 | io_req_set_res(req, res: ret, cflags: 0); |
98 | if (req->ctx->flags & IORING_SETUP_CQE32) |
99 | io_req_set_cqe32_extra(req, extra1: res2, extra2: 0); |
100 | if (req->ctx->flags & IORING_SETUP_IOPOLL) { |
101 | /* order with io_iopoll_req_issued() checking ->iopoll_complete */ |
102 | smp_store_release(&req->iopoll_completed, 1); |
103 | } else { |
104 | struct io_tw_state ts = { |
105 | .locked = !(issue_flags & IO_URING_F_UNLOCKED), |
106 | }; |
107 | io_req_task_complete(req, ts: &ts); |
108 | } |
109 | } |
110 | EXPORT_SYMBOL_GPL(io_uring_cmd_done); |
111 | |
112 | int io_uring_cmd_prep_async(struct io_kiocb *req) |
113 | { |
114 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); |
115 | |
116 | memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx)); |
117 | ioucmd->sqe = req->async_data; |
118 | return 0; |
119 | } |
120 | |
121 | int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
122 | { |
123 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); |
124 | |
125 | if (sqe->__pad1) |
126 | return -EINVAL; |
127 | |
128 | ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags); |
129 | if (ioucmd->flags & ~IORING_URING_CMD_MASK) |
130 | return -EINVAL; |
131 | |
132 | if (ioucmd->flags & IORING_URING_CMD_FIXED) { |
133 | struct io_ring_ctx *ctx = req->ctx; |
134 | u16 index; |
135 | |
136 | req->buf_index = READ_ONCE(sqe->buf_index); |
137 | if (unlikely(req->buf_index >= ctx->nr_user_bufs)) |
138 | return -EFAULT; |
139 | index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); |
140 | req->imu = ctx->user_bufs[index]; |
141 | io_req_set_rsrc_node(req, ctx, issue_flags: 0); |
142 | } |
143 | ioucmd->sqe = sqe; |
144 | ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); |
145 | return 0; |
146 | } |
147 | |
148 | int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) |
149 | { |
150 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); |
151 | struct io_ring_ctx *ctx = req->ctx; |
152 | struct file *file = req->file; |
153 | int ret; |
154 | |
155 | if (!file->f_op->uring_cmd) |
156 | return -EOPNOTSUPP; |
157 | |
158 | ret = security_uring_cmd(ioucmd); |
159 | if (ret) |
160 | return ret; |
161 | |
162 | if (ctx->flags & IORING_SETUP_SQE128) |
163 | issue_flags |= IO_URING_F_SQE128; |
164 | if (ctx->flags & IORING_SETUP_CQE32) |
165 | issue_flags |= IO_URING_F_CQE32; |
166 | if (ctx->compat) |
167 | issue_flags |= IO_URING_F_COMPAT; |
168 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
169 | if (!file->f_op->uring_cmd_iopoll) |
170 | return -EOPNOTSUPP; |
171 | issue_flags |= IO_URING_F_IOPOLL; |
172 | req->iopoll_completed = 0; |
173 | } |
174 | |
175 | ret = file->f_op->uring_cmd(ioucmd, issue_flags); |
176 | if (ret == -EAGAIN) { |
177 | if (!req_has_async_data(req)) { |
178 | if (io_alloc_async_data(req)) |
179 | return -ENOMEM; |
180 | io_uring_cmd_prep_async(req); |
181 | } |
182 | return -EAGAIN; |
183 | } |
184 | |
185 | if (ret != -EIOCBQUEUED) { |
186 | if (ret < 0) |
187 | req_set_fail(req); |
188 | io_req_set_res(req, res: ret, cflags: 0); |
189 | return ret; |
190 | } |
191 | |
192 | return IOU_ISSUE_SKIP_COMPLETE; |
193 | } |
194 | |
195 | int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, |
196 | struct iov_iter *iter, void *ioucmd) |
197 | { |
198 | struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); |
199 | |
200 | return io_import_fixed(ddir: rw, iter, imu: req->imu, buf_addr: ubuf, len); |
201 | } |
202 | EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed); |
203 | |
204 | static inline int io_uring_cmd_getsockopt(struct socket *sock, |
205 | struct io_uring_cmd *cmd, |
206 | unsigned int issue_flags) |
207 | { |
208 | bool compat = !!(issue_flags & IO_URING_F_COMPAT); |
209 | int optlen, optname, level, err; |
210 | void __user *optval; |
211 | |
212 | level = READ_ONCE(cmd->sqe->level); |
213 | if (level != SOL_SOCKET) |
214 | return -EOPNOTSUPP; |
215 | |
216 | optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval)); |
217 | optname = READ_ONCE(cmd->sqe->optname); |
218 | optlen = READ_ONCE(cmd->sqe->optlen); |
219 | |
220 | err = do_sock_getsockopt(sock, compat, level, optname, |
221 | optval: USER_SOCKPTR(p: optval), |
222 | optlen: KERNEL_SOCKPTR(p: &optlen)); |
223 | if (err) |
224 | return err; |
225 | |
226 | /* On success, return optlen */ |
227 | return optlen; |
228 | } |
229 | |
230 | static inline int io_uring_cmd_setsockopt(struct socket *sock, |
231 | struct io_uring_cmd *cmd, |
232 | unsigned int issue_flags) |
233 | { |
234 | bool compat = !!(issue_flags & IO_URING_F_COMPAT); |
235 | int optname, optlen, level; |
236 | void __user *optval; |
237 | sockptr_t optval_s; |
238 | |
239 | optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval)); |
240 | optname = READ_ONCE(cmd->sqe->optname); |
241 | optlen = READ_ONCE(cmd->sqe->optlen); |
242 | level = READ_ONCE(cmd->sqe->level); |
243 | optval_s = USER_SOCKPTR(p: optval); |
244 | |
245 | return do_sock_setsockopt(sock, compat, level, optname, optval: optval_s, |
246 | optlen); |
247 | } |
248 | |
249 | #if defined(CONFIG_NET) |
250 | int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) |
251 | { |
252 | struct socket *sock = cmd->file->private_data; |
253 | struct sock *sk = sock->sk; |
254 | struct proto *prot = READ_ONCE(sk->sk_prot); |
255 | int ret, arg = 0; |
256 | |
257 | if (!prot || !prot->ioctl) |
258 | return -EOPNOTSUPP; |
259 | |
260 | switch (cmd->sqe->cmd_op) { |
261 | case SOCKET_URING_OP_SIOCINQ: |
262 | ret = prot->ioctl(sk, SIOCINQ, &arg); |
263 | if (ret) |
264 | return ret; |
265 | return arg; |
266 | case SOCKET_URING_OP_SIOCOUTQ: |
267 | ret = prot->ioctl(sk, SIOCOUTQ, &arg); |
268 | if (ret) |
269 | return ret; |
270 | return arg; |
271 | case SOCKET_URING_OP_GETSOCKOPT: |
272 | return io_uring_cmd_getsockopt(sock, cmd, issue_flags); |
273 | case SOCKET_URING_OP_SETSOCKOPT: |
274 | return io_uring_cmd_setsockopt(sock, cmd, issue_flags); |
275 | default: |
276 | return -EOPNOTSUPP; |
277 | } |
278 | } |
279 | EXPORT_SYMBOL_GPL(io_uring_cmd_sock); |
280 | #endif |
281 | |