1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* XDP sockets |
3 | * |
4 | * AF_XDP sockets allows a channel between XDP programs and userspace |
5 | * applications. |
6 | * Copyright(c) 2018 Intel Corporation. |
7 | * |
8 | * Author(s): Björn Töpel <bjorn.topel@intel.com> |
9 | * Magnus Karlsson <magnus.karlsson@intel.com> |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ |
13 | |
14 | #include <linux/if_xdp.h> |
15 | #include <linux/init.h> |
16 | #include <linux/sched/mm.h> |
17 | #include <linux/sched/signal.h> |
18 | #include <linux/sched/task.h> |
19 | #include <linux/socket.h> |
20 | #include <linux/file.h> |
21 | #include <linux/uaccess.h> |
22 | #include <linux/net.h> |
23 | #include <linux/netdevice.h> |
24 | #include <linux/rculist.h> |
25 | #include <linux/vmalloc.h> |
26 | #include <net/xdp_sock_drv.h> |
27 | #include <net/busy_poll.h> |
28 | #include <net/netdev_rx_queue.h> |
29 | #include <net/xdp.h> |
30 | |
31 | #include "xsk_queue.h" |
32 | #include "xdp_umem.h" |
33 | #include "xsk.h" |
34 | |
35 | #define TX_BATCH_SIZE 32 |
36 | #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE) |
37 | |
38 | static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); |
39 | |
40 | void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) |
41 | { |
42 | if (pool->cached_need_wakeup & XDP_WAKEUP_RX) |
43 | return; |
44 | |
45 | pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; |
46 | pool->cached_need_wakeup |= XDP_WAKEUP_RX; |
47 | } |
48 | EXPORT_SYMBOL(xsk_set_rx_need_wakeup); |
49 | |
50 | void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) |
51 | { |
52 | struct xdp_sock *xs; |
53 | |
54 | if (pool->cached_need_wakeup & XDP_WAKEUP_TX) |
55 | return; |
56 | |
57 | rcu_read_lock(); |
58 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
59 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; |
60 | } |
61 | rcu_read_unlock(); |
62 | |
63 | pool->cached_need_wakeup |= XDP_WAKEUP_TX; |
64 | } |
65 | EXPORT_SYMBOL(xsk_set_tx_need_wakeup); |
66 | |
67 | void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) |
68 | { |
69 | if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) |
70 | return; |
71 | |
72 | pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; |
73 | pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; |
74 | } |
75 | EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); |
76 | |
77 | void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) |
78 | { |
79 | struct xdp_sock *xs; |
80 | |
81 | if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) |
82 | return; |
83 | |
84 | rcu_read_lock(); |
85 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
86 | xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; |
87 | } |
88 | rcu_read_unlock(); |
89 | |
90 | pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; |
91 | } |
92 | EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); |
93 | |
94 | bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) |
95 | { |
96 | return pool->uses_need_wakeup; |
97 | } |
98 | EXPORT_SYMBOL(xsk_uses_need_wakeup); |
99 | |
100 | struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, |
101 | u16 queue_id) |
102 | { |
103 | if (queue_id < dev->real_num_rx_queues) |
104 | return dev->_rx[queue_id].pool; |
105 | if (queue_id < dev->real_num_tx_queues) |
106 | return dev->_tx[queue_id].pool; |
107 | |
108 | return NULL; |
109 | } |
110 | EXPORT_SYMBOL(xsk_get_pool_from_qid); |
111 | |
112 | void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) |
113 | { |
114 | if (queue_id < dev->num_rx_queues) |
115 | dev->_rx[queue_id].pool = NULL; |
116 | if (queue_id < dev->num_tx_queues) |
117 | dev->_tx[queue_id].pool = NULL; |
118 | } |
119 | |
120 | /* The buffer pool is stored both in the _rx struct and the _tx struct as we do |
121 | * not know if the device has more tx queues than rx, or the opposite. |
122 | * This might also change during run time. |
123 | */ |
124 | int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, |
125 | u16 queue_id) |
126 | { |
127 | if (queue_id >= max_t(unsigned int, |
128 | dev->real_num_rx_queues, |
129 | dev->real_num_tx_queues)) |
130 | return -EINVAL; |
131 | |
132 | if (queue_id < dev->real_num_rx_queues) |
133 | dev->_rx[queue_id].pool = pool; |
134 | if (queue_id < dev->real_num_tx_queues) |
135 | dev->_tx[queue_id].pool = pool; |
136 | |
137 | return 0; |
138 | } |
139 | |
140 | static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, |
141 | u32 flags) |
142 | { |
143 | u64 addr; |
144 | int err; |
145 | |
146 | addr = xp_get_handle(xskb); |
147 | err = xskq_prod_reserve_desc(q: xs->rx, addr, len, flags); |
148 | if (err) { |
149 | xs->rx_queue_full++; |
150 | return err; |
151 | } |
152 | |
153 | xp_release(xskb); |
154 | return 0; |
155 | } |
156 | |
157 | static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
158 | { |
159 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
160 | u32 frags = xdp_buff_has_frags(xdp); |
161 | struct xdp_buff_xsk *pos, *tmp; |
162 | struct list_head *xskb_list; |
163 | u32 contd = 0; |
164 | int err; |
165 | |
166 | if (frags) |
167 | contd = XDP_PKT_CONTD; |
168 | |
169 | err = __xsk_rcv_zc(xs, xskb, len, flags: contd); |
170 | if (err || likely(!frags)) |
171 | goto out; |
172 | |
173 | xskb_list = &xskb->pool->xskb_list; |
174 | list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { |
175 | if (list_is_singular(head: xskb_list)) |
176 | contd = 0; |
177 | len = pos->xdp.data_end - pos->xdp.data; |
178 | err = __xsk_rcv_zc(xs, xskb: pos, len, flags: contd); |
179 | if (err) |
180 | return err; |
181 | list_del(entry: &pos->xskb_list_node); |
182 | } |
183 | |
184 | out: |
185 | return err; |
186 | } |
187 | |
188 | static void *xsk_copy_xdp_start(struct xdp_buff *from) |
189 | { |
190 | if (unlikely(xdp_data_meta_unsupported(from))) |
191 | return from->data; |
192 | else |
193 | return from->data_meta; |
194 | } |
195 | |
196 | static u32 xsk_copy_xdp(void *to, void **from, u32 to_len, |
197 | u32 *from_len, skb_frag_t **frag, u32 rem) |
198 | { |
199 | u32 copied = 0; |
200 | |
201 | while (1) { |
202 | u32 copy_len = min_t(u32, *from_len, to_len); |
203 | |
204 | memcpy(to, *from, copy_len); |
205 | copied += copy_len; |
206 | if (rem == copied) |
207 | return copied; |
208 | |
209 | if (*from_len == copy_len) { |
210 | *from = skb_frag_address(frag: *frag); |
211 | *from_len = skb_frag_size(frag: (*frag)++); |
212 | } else { |
213 | *from += copy_len; |
214 | *from_len -= copy_len; |
215 | } |
216 | if (to_len == copy_len) |
217 | return copied; |
218 | |
219 | to_len -= copy_len; |
220 | to += copy_len; |
221 | } |
222 | } |
223 | |
224 | static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
225 | { |
226 | u32 frame_size = xsk_pool_get_rx_frame_size(pool: xs->pool); |
227 | void *copy_from = xsk_copy_xdp_start(from: xdp), *copy_to; |
228 | u32 from_len, meta_len, rem, num_desc; |
229 | struct xdp_buff_xsk *xskb; |
230 | struct xdp_buff *xsk_xdp; |
231 | skb_frag_t *frag; |
232 | |
233 | from_len = xdp->data_end - copy_from; |
234 | meta_len = xdp->data - copy_from; |
235 | rem = len + meta_len; |
236 | |
237 | if (len <= frame_size && !xdp_buff_has_frags(xdp)) { |
238 | int err; |
239 | |
240 | xsk_xdp = xsk_buff_alloc(pool: xs->pool); |
241 | if (!xsk_xdp) { |
242 | xs->rx_dropped++; |
243 | return -ENOMEM; |
244 | } |
245 | memcpy(xsk_xdp->data - meta_len, copy_from, rem); |
246 | xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); |
247 | err = __xsk_rcv_zc(xs, xskb, len, flags: 0); |
248 | if (err) { |
249 | xsk_buff_free(xdp: xsk_xdp); |
250 | return err; |
251 | } |
252 | |
253 | return 0; |
254 | } |
255 | |
256 | num_desc = (len - 1) / frame_size + 1; |
257 | |
258 | if (!xsk_buff_can_alloc(pool: xs->pool, count: num_desc)) { |
259 | xs->rx_dropped++; |
260 | return -ENOMEM; |
261 | } |
262 | if (xskq_prod_nb_free(q: xs->rx, max: num_desc) < num_desc) { |
263 | xs->rx_queue_full++; |
264 | return -ENOBUFS; |
265 | } |
266 | |
267 | if (xdp_buff_has_frags(xdp)) { |
268 | struct skb_shared_info *sinfo; |
269 | |
270 | sinfo = xdp_get_shared_info_from_buff(xdp); |
271 | frag = &sinfo->frags[0]; |
272 | } |
273 | |
274 | do { |
275 | u32 to_len = frame_size + meta_len; |
276 | u32 copied; |
277 | |
278 | xsk_xdp = xsk_buff_alloc(pool: xs->pool); |
279 | copy_to = xsk_xdp->data - meta_len; |
280 | |
281 | copied = xsk_copy_xdp(to: copy_to, from: ©_from, to_len, from_len: &from_len, frag: &frag, rem); |
282 | rem -= copied; |
283 | |
284 | xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); |
285 | __xsk_rcv_zc(xs, xskb, len: copied - meta_len, flags: rem ? XDP_PKT_CONTD : 0); |
286 | meta_len = 0; |
287 | } while (rem); |
288 | |
289 | return 0; |
290 | } |
291 | |
292 | static bool xsk_tx_writeable(struct xdp_sock *xs) |
293 | { |
294 | if (xskq_cons_present_entries(q: xs->tx) > xs->tx->nentries / 2) |
295 | return false; |
296 | |
297 | return true; |
298 | } |
299 | |
300 | static bool xsk_is_bound(struct xdp_sock *xs) |
301 | { |
302 | if (READ_ONCE(xs->state) == XSK_BOUND) { |
303 | /* Matches smp_wmb() in bind(). */ |
304 | smp_rmb(); |
305 | return true; |
306 | } |
307 | return false; |
308 | } |
309 | |
310 | static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
311 | { |
312 | if (!xsk_is_bound(xs)) |
313 | return -ENXIO; |
314 | |
315 | if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) |
316 | return -EINVAL; |
317 | |
318 | if (len > xsk_pool_get_rx_frame_size(pool: xs->pool) && !xs->sg) { |
319 | xs->rx_dropped++; |
320 | return -ENOSPC; |
321 | } |
322 | |
323 | sk_mark_napi_id_once_xdp(sk: &xs->sk, xdp); |
324 | return 0; |
325 | } |
326 | |
327 | static void xsk_flush(struct xdp_sock *xs) |
328 | { |
329 | xskq_prod_submit(q: xs->rx); |
330 | __xskq_cons_release(q: xs->pool->fq); |
331 | sock_def_readable(sk: &xs->sk); |
332 | } |
333 | |
334 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
335 | { |
336 | u32 len = xdp_get_buff_len(xdp); |
337 | int err; |
338 | |
339 | spin_lock_bh(lock: &xs->rx_lock); |
340 | err = xsk_rcv_check(xs, xdp, len); |
341 | if (!err) { |
342 | err = __xsk_rcv(xs, xdp, len); |
343 | xsk_flush(xs); |
344 | } |
345 | spin_unlock_bh(lock: &xs->rx_lock); |
346 | return err; |
347 | } |
348 | |
349 | static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
350 | { |
351 | u32 len = xdp_get_buff_len(xdp); |
352 | int err; |
353 | |
354 | err = xsk_rcv_check(xs, xdp, len); |
355 | if (err) |
356 | return err; |
357 | |
358 | if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { |
359 | len = xdp->data_end - xdp->data; |
360 | return xsk_rcv_zc(xs, xdp, len); |
361 | } |
362 | |
363 | err = __xsk_rcv(xs, xdp, len); |
364 | if (!err) |
365 | xdp_return_buff(xdp); |
366 | return err; |
367 | } |
368 | |
369 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
370 | { |
371 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
372 | int err; |
373 | |
374 | err = xsk_rcv(xs, xdp); |
375 | if (err) |
376 | return err; |
377 | |
378 | if (!xs->flush_node.prev) |
379 | list_add(new: &xs->flush_node, head: flush_list); |
380 | |
381 | return 0; |
382 | } |
383 | |
384 | void __xsk_map_flush(void) |
385 | { |
386 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
387 | struct xdp_sock *xs, *tmp; |
388 | |
389 | list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { |
390 | xsk_flush(xs); |
391 | __list_del_clearprev(entry: &xs->flush_node); |
392 | } |
393 | } |
394 | |
395 | #ifdef CONFIG_DEBUG_NET |
396 | bool xsk_map_check_flush(void) |
397 | { |
398 | if (list_empty(this_cpu_ptr(&xskmap_flush_list))) |
399 | return false; |
400 | __xsk_map_flush(); |
401 | return true; |
402 | } |
403 | #endif |
404 | |
405 | void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) |
406 | { |
407 | xskq_prod_submit_n(q: pool->cq, nb_entries); |
408 | } |
409 | EXPORT_SYMBOL(xsk_tx_completed); |
410 | |
411 | void xsk_tx_release(struct xsk_buff_pool *pool) |
412 | { |
413 | struct xdp_sock *xs; |
414 | |
415 | rcu_read_lock(); |
416 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
417 | __xskq_cons_release(q: xs->tx); |
418 | if (xsk_tx_writeable(xs)) |
419 | xs->sk.sk_write_space(&xs->sk); |
420 | } |
421 | rcu_read_unlock(); |
422 | } |
423 | EXPORT_SYMBOL(xsk_tx_release); |
424 | |
425 | bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) |
426 | { |
427 | bool budget_exhausted = false; |
428 | struct xdp_sock *xs; |
429 | |
430 | rcu_read_lock(); |
431 | again: |
432 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
433 | if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) { |
434 | budget_exhausted = true; |
435 | continue; |
436 | } |
437 | |
438 | if (!xskq_cons_peek_desc(q: xs->tx, desc, pool)) { |
439 | if (xskq_has_descs(q: xs->tx)) |
440 | xskq_cons_release(q: xs->tx); |
441 | continue; |
442 | } |
443 | |
444 | xs->tx_budget_spent++; |
445 | |
446 | /* This is the backpressure mechanism for the Tx path. |
447 | * Reserve space in the completion queue and only proceed |
448 | * if there is space in it. This avoids having to implement |
449 | * any buffering in the Tx path. |
450 | */ |
451 | if (xskq_prod_reserve_addr(q: pool->cq, addr: desc->addr)) |
452 | goto out; |
453 | |
454 | xskq_cons_release(q: xs->tx); |
455 | rcu_read_unlock(); |
456 | return true; |
457 | } |
458 | |
459 | if (budget_exhausted) { |
460 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) |
461 | xs->tx_budget_spent = 0; |
462 | |
463 | budget_exhausted = false; |
464 | goto again; |
465 | } |
466 | |
467 | out: |
468 | rcu_read_unlock(); |
469 | return false; |
470 | } |
471 | EXPORT_SYMBOL(xsk_tx_peek_desc); |
472 | |
473 | static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) |
474 | { |
475 | struct xdp_desc *descs = pool->tx_descs; |
476 | u32 nb_pkts = 0; |
477 | |
478 | while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) |
479 | nb_pkts++; |
480 | |
481 | xsk_tx_release(pool); |
482 | return nb_pkts; |
483 | } |
484 | |
485 | u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) |
486 | { |
487 | struct xdp_sock *xs; |
488 | |
489 | rcu_read_lock(); |
490 | if (!list_is_singular(head: &pool->xsk_tx_list)) { |
491 | /* Fallback to the non-batched version */ |
492 | rcu_read_unlock(); |
493 | return xsk_tx_peek_release_fallback(pool, max_entries: nb_pkts); |
494 | } |
495 | |
496 | xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); |
497 | if (!xs) { |
498 | nb_pkts = 0; |
499 | goto out; |
500 | } |
501 | |
502 | nb_pkts = xskq_cons_nb_entries(q: xs->tx, max: nb_pkts); |
503 | |
504 | /* This is the backpressure mechanism for the Tx path. Try to |
505 | * reserve space in the completion queue for all packets, but |
506 | * if there are fewer slots available, just process that many |
507 | * packets. This avoids having to implement any buffering in |
508 | * the Tx path. |
509 | */ |
510 | nb_pkts = xskq_prod_nb_free(q: pool->cq, max: nb_pkts); |
511 | if (!nb_pkts) |
512 | goto out; |
513 | |
514 | nb_pkts = xskq_cons_read_desc_batch(q: xs->tx, pool, max: nb_pkts); |
515 | if (!nb_pkts) { |
516 | xs->tx->queue_empty_descs++; |
517 | goto out; |
518 | } |
519 | |
520 | __xskq_cons_release(q: xs->tx); |
521 | xskq_prod_write_addr_batch(q: pool->cq, descs: pool->tx_descs, nb_entries: nb_pkts); |
522 | xs->sk.sk_write_space(&xs->sk); |
523 | |
524 | out: |
525 | rcu_read_unlock(); |
526 | return nb_pkts; |
527 | } |
528 | EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); |
529 | |
530 | static int xsk_wakeup(struct xdp_sock *xs, u8 flags) |
531 | { |
532 | struct net_device *dev = xs->dev; |
533 | |
534 | return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); |
535 | } |
536 | |
537 | static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr) |
538 | { |
539 | unsigned long flags; |
540 | int ret; |
541 | |
542 | spin_lock_irqsave(&xs->pool->cq_lock, flags); |
543 | ret = xskq_prod_reserve_addr(q: xs->pool->cq, addr); |
544 | spin_unlock_irqrestore(lock: &xs->pool->cq_lock, flags); |
545 | |
546 | return ret; |
547 | } |
548 | |
549 | static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n) |
550 | { |
551 | unsigned long flags; |
552 | |
553 | spin_lock_irqsave(&xs->pool->cq_lock, flags); |
554 | xskq_prod_submit_n(q: xs->pool->cq, nb_entries: n); |
555 | spin_unlock_irqrestore(lock: &xs->pool->cq_lock, flags); |
556 | } |
557 | |
558 | static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n) |
559 | { |
560 | unsigned long flags; |
561 | |
562 | spin_lock_irqsave(&xs->pool->cq_lock, flags); |
563 | xskq_prod_cancel_n(q: xs->pool->cq, cnt: n); |
564 | spin_unlock_irqrestore(lock: &xs->pool->cq_lock, flags); |
565 | } |
566 | |
567 | static u32 xsk_get_num_desc(struct sk_buff *skb) |
568 | { |
569 | return skb ? (long)skb_shinfo(skb)->destructor_arg : 0; |
570 | } |
571 | |
572 | static void xsk_destruct_skb(struct sk_buff *skb) |
573 | { |
574 | xsk_cq_submit_locked(xs: xdp_sk(sk: skb->sk), n: xsk_get_num_desc(skb)); |
575 | sock_wfree(skb); |
576 | } |
577 | |
578 | static void xsk_set_destructor_arg(struct sk_buff *skb) |
579 | { |
580 | long num = xsk_get_num_desc(skb: xdp_sk(sk: skb->sk)->skb) + 1; |
581 | |
582 | skb_shinfo(skb)->destructor_arg = (void *)num; |
583 | } |
584 | |
585 | static void xsk_consume_skb(struct sk_buff *skb) |
586 | { |
587 | struct xdp_sock *xs = xdp_sk(sk: skb->sk); |
588 | |
589 | skb->destructor = sock_wfree; |
590 | xsk_cq_cancel_locked(xs, n: xsk_get_num_desc(skb)); |
591 | /* Free skb without triggering the perf drop trace */ |
592 | consume_skb(skb); |
593 | xs->skb = NULL; |
594 | } |
595 | |
596 | static void xsk_drop_skb(struct sk_buff *skb) |
597 | { |
598 | xdp_sk(sk: skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb); |
599 | xsk_consume_skb(skb); |
600 | } |
601 | |
602 | static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, |
603 | struct xdp_desc *desc) |
604 | { |
605 | struct xsk_buff_pool *pool = xs->pool; |
606 | u32 hr, len, ts, offset, copy, copied; |
607 | struct sk_buff *skb = xs->skb; |
608 | struct page *page; |
609 | void *buffer; |
610 | int err, i; |
611 | u64 addr; |
612 | |
613 | if (!skb) { |
614 | hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); |
615 | |
616 | skb = sock_alloc_send_skb(sk: &xs->sk, size: hr, noblock: 1, errcode: &err); |
617 | if (unlikely(!skb)) |
618 | return ERR_PTR(error: err); |
619 | |
620 | skb_reserve(skb, len: hr); |
621 | } |
622 | |
623 | addr = desc->addr; |
624 | len = desc->len; |
625 | ts = pool->unaligned ? len : pool->chunk_size; |
626 | |
627 | buffer = xsk_buff_raw_get_data(pool, addr); |
628 | offset = offset_in_page(buffer); |
629 | addr = buffer - pool->addrs; |
630 | |
631 | for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) { |
632 | if (unlikely(i >= MAX_SKB_FRAGS)) |
633 | return ERR_PTR(error: -EOVERFLOW); |
634 | |
635 | page = pool->umem->pgs[addr >> PAGE_SHIFT]; |
636 | get_page(page); |
637 | |
638 | copy = min_t(u32, PAGE_SIZE - offset, len - copied); |
639 | skb_fill_page_desc(skb, i, page, off: offset, size: copy); |
640 | |
641 | copied += copy; |
642 | addr += copy; |
643 | offset = 0; |
644 | } |
645 | |
646 | skb->len += len; |
647 | skb->data_len += len; |
648 | skb->truesize += ts; |
649 | |
650 | refcount_add(i: ts, r: &xs->sk.sk_wmem_alloc); |
651 | |
652 | return skb; |
653 | } |
654 | |
655 | static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, |
656 | struct xdp_desc *desc) |
657 | { |
658 | struct net_device *dev = xs->dev; |
659 | struct sk_buff *skb = xs->skb; |
660 | int err; |
661 | |
662 | if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { |
663 | skb = xsk_build_skb_zerocopy(xs, desc); |
664 | if (IS_ERR(ptr: skb)) { |
665 | err = PTR_ERR(ptr: skb); |
666 | goto free_err; |
667 | } |
668 | } else { |
669 | u32 hr, tr, len; |
670 | void *buffer; |
671 | |
672 | buffer = xsk_buff_raw_get_data(pool: xs->pool, addr: desc->addr); |
673 | len = desc->len; |
674 | |
675 | if (!skb) { |
676 | hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); |
677 | tr = dev->needed_tailroom; |
678 | skb = sock_alloc_send_skb(sk: &xs->sk, size: hr + len + tr, noblock: 1, errcode: &err); |
679 | if (unlikely(!skb)) |
680 | goto free_err; |
681 | |
682 | skb_reserve(skb, len: hr); |
683 | skb_put(skb, len); |
684 | |
685 | err = skb_store_bits(skb, offset: 0, from: buffer, len); |
686 | if (unlikely(err)) { |
687 | kfree_skb(skb); |
688 | goto free_err; |
689 | } |
690 | } else { |
691 | int nr_frags = skb_shinfo(skb)->nr_frags; |
692 | struct page *page; |
693 | u8 *vaddr; |
694 | |
695 | if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) { |
696 | err = -EOVERFLOW; |
697 | goto free_err; |
698 | } |
699 | |
700 | page = alloc_page(xs->sk.sk_allocation); |
701 | if (unlikely(!page)) { |
702 | err = -EAGAIN; |
703 | goto free_err; |
704 | } |
705 | |
706 | vaddr = kmap_local_page(page); |
707 | memcpy(vaddr, buffer, len); |
708 | kunmap_local(vaddr); |
709 | |
710 | skb_add_rx_frag(skb, i: nr_frags, page, off: 0, size: len, truesize: 0); |
711 | } |
712 | } |
713 | |
714 | skb->dev = dev; |
715 | skb->priority = READ_ONCE(xs->sk.sk_priority); |
716 | skb->mark = READ_ONCE(xs->sk.sk_mark); |
717 | skb->destructor = xsk_destruct_skb; |
718 | xsk_set_destructor_arg(skb); |
719 | |
720 | return skb; |
721 | |
722 | free_err: |
723 | if (err == -EOVERFLOW) { |
724 | /* Drop the packet */ |
725 | xsk_set_destructor_arg(skb: xs->skb); |
726 | xsk_drop_skb(skb: xs->skb); |
727 | xskq_cons_release(q: xs->tx); |
728 | } else { |
729 | /* Let application retry */ |
730 | xsk_cq_cancel_locked(xs, n: 1); |
731 | } |
732 | |
733 | return ERR_PTR(error: err); |
734 | } |
735 | |
736 | static int __xsk_generic_xmit(struct sock *sk) |
737 | { |
738 | struct xdp_sock *xs = xdp_sk(sk); |
739 | u32 max_batch = TX_BATCH_SIZE; |
740 | bool sent_frame = false; |
741 | struct xdp_desc desc; |
742 | struct sk_buff *skb; |
743 | int err = 0; |
744 | |
745 | mutex_lock(&xs->mutex); |
746 | |
747 | /* Since we dropped the RCU read lock, the socket state might have changed. */ |
748 | if (unlikely(!xsk_is_bound(xs))) { |
749 | err = -ENXIO; |
750 | goto out; |
751 | } |
752 | |
753 | if (xs->queue_id >= xs->dev->real_num_tx_queues) |
754 | goto out; |
755 | |
756 | while (xskq_cons_peek_desc(q: xs->tx, desc: &desc, pool: xs->pool)) { |
757 | if (max_batch-- == 0) { |
758 | err = -EAGAIN; |
759 | goto out; |
760 | } |
761 | |
762 | /* This is the backpressure mechanism for the Tx path. |
763 | * Reserve space in the completion queue and only proceed |
764 | * if there is space in it. This avoids having to implement |
765 | * any buffering in the Tx path. |
766 | */ |
767 | if (xsk_cq_reserve_addr_locked(xs, addr: desc.addr)) |
768 | goto out; |
769 | |
770 | skb = xsk_build_skb(xs, desc: &desc); |
771 | if (IS_ERR(ptr: skb)) { |
772 | err = PTR_ERR(ptr: skb); |
773 | if (err != -EOVERFLOW) |
774 | goto out; |
775 | err = 0; |
776 | continue; |
777 | } |
778 | |
779 | xskq_cons_release(q: xs->tx); |
780 | |
781 | if (xp_mb_desc(desc: &desc)) { |
782 | xs->skb = skb; |
783 | continue; |
784 | } |
785 | |
786 | err = __dev_direct_xmit(skb, queue_id: xs->queue_id); |
787 | if (err == NETDEV_TX_BUSY) { |
788 | /* Tell user-space to retry the send */ |
789 | xskq_cons_cancel_n(q: xs->tx, cnt: xsk_get_num_desc(skb)); |
790 | xsk_consume_skb(skb); |
791 | err = -EAGAIN; |
792 | goto out; |
793 | } |
794 | |
795 | /* Ignore NET_XMIT_CN as packet might have been sent */ |
796 | if (err == NET_XMIT_DROP) { |
797 | /* SKB completed but not sent */ |
798 | err = -EBUSY; |
799 | xs->skb = NULL; |
800 | goto out; |
801 | } |
802 | |
803 | sent_frame = true; |
804 | xs->skb = NULL; |
805 | } |
806 | |
807 | if (xskq_has_descs(q: xs->tx)) { |
808 | if (xs->skb) |
809 | xsk_drop_skb(skb: xs->skb); |
810 | xskq_cons_release(q: xs->tx); |
811 | } |
812 | |
813 | out: |
814 | if (sent_frame) |
815 | if (xsk_tx_writeable(xs)) |
816 | sk->sk_write_space(sk); |
817 | |
818 | mutex_unlock(lock: &xs->mutex); |
819 | return err; |
820 | } |
821 | |
822 | static int xsk_generic_xmit(struct sock *sk) |
823 | { |
824 | int ret; |
825 | |
826 | /* Drop the RCU lock since the SKB path might sleep. */ |
827 | rcu_read_unlock(); |
828 | ret = __xsk_generic_xmit(sk); |
829 | /* Reaquire RCU lock before going into common code. */ |
830 | rcu_read_lock(); |
831 | |
832 | return ret; |
833 | } |
834 | |
835 | static bool xsk_no_wakeup(struct sock *sk) |
836 | { |
837 | #ifdef CONFIG_NET_RX_BUSY_POLL |
838 | /* Prefer busy-polling, skip the wakeup. */ |
839 | return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && |
840 | READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; |
841 | #else |
842 | return false; |
843 | #endif |
844 | } |
845 | |
846 | static int xsk_check_common(struct xdp_sock *xs) |
847 | { |
848 | if (unlikely(!xsk_is_bound(xs))) |
849 | return -ENXIO; |
850 | if (unlikely(!(xs->dev->flags & IFF_UP))) |
851 | return -ENETDOWN; |
852 | |
853 | return 0; |
854 | } |
855 | |
856 | static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
857 | { |
858 | bool need_wait = !(m->msg_flags & MSG_DONTWAIT); |
859 | struct sock *sk = sock->sk; |
860 | struct xdp_sock *xs = xdp_sk(sk); |
861 | struct xsk_buff_pool *pool; |
862 | int err; |
863 | |
864 | err = xsk_check_common(xs); |
865 | if (err) |
866 | return err; |
867 | if (unlikely(need_wait)) |
868 | return -EOPNOTSUPP; |
869 | if (unlikely(!xs->tx)) |
870 | return -ENOBUFS; |
871 | |
872 | if (sk_can_busy_loop(sk)) { |
873 | if (xs->zc) |
874 | __sk_mark_napi_id_once(sk, napi_id: xsk_pool_get_napi_id(pool: xs->pool)); |
875 | sk_busy_loop(sk, nonblock: 1); /* only support non-blocking sockets */ |
876 | } |
877 | |
878 | if (xs->zc && xsk_no_wakeup(sk)) |
879 | return 0; |
880 | |
881 | pool = xs->pool; |
882 | if (pool->cached_need_wakeup & XDP_WAKEUP_TX) { |
883 | if (xs->zc) |
884 | return xsk_wakeup(xs, XDP_WAKEUP_TX); |
885 | return xsk_generic_xmit(sk); |
886 | } |
887 | return 0; |
888 | } |
889 | |
890 | static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
891 | { |
892 | int ret; |
893 | |
894 | rcu_read_lock(); |
895 | ret = __xsk_sendmsg(sock, m, total_len); |
896 | rcu_read_unlock(); |
897 | |
898 | return ret; |
899 | } |
900 | |
901 | static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) |
902 | { |
903 | bool need_wait = !(flags & MSG_DONTWAIT); |
904 | struct sock *sk = sock->sk; |
905 | struct xdp_sock *xs = xdp_sk(sk); |
906 | int err; |
907 | |
908 | err = xsk_check_common(xs); |
909 | if (err) |
910 | return err; |
911 | if (unlikely(!xs->rx)) |
912 | return -ENOBUFS; |
913 | if (unlikely(need_wait)) |
914 | return -EOPNOTSUPP; |
915 | |
916 | if (sk_can_busy_loop(sk)) |
917 | sk_busy_loop(sk, nonblock: 1); /* only support non-blocking sockets */ |
918 | |
919 | if (xsk_no_wakeup(sk)) |
920 | return 0; |
921 | |
922 | if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) |
923 | return xsk_wakeup(xs, XDP_WAKEUP_RX); |
924 | return 0; |
925 | } |
926 | |
927 | static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) |
928 | { |
929 | int ret; |
930 | |
931 | rcu_read_lock(); |
932 | ret = __xsk_recvmsg(sock, m, len, flags); |
933 | rcu_read_unlock(); |
934 | |
935 | return ret; |
936 | } |
937 | |
938 | static __poll_t xsk_poll(struct file *file, struct socket *sock, |
939 | struct poll_table_struct *wait) |
940 | { |
941 | __poll_t mask = 0; |
942 | struct sock *sk = sock->sk; |
943 | struct xdp_sock *xs = xdp_sk(sk); |
944 | struct xsk_buff_pool *pool; |
945 | |
946 | sock_poll_wait(filp: file, sock, p: wait); |
947 | |
948 | rcu_read_lock(); |
949 | if (xsk_check_common(xs)) |
950 | goto skip_tx; |
951 | |
952 | pool = xs->pool; |
953 | |
954 | if (pool->cached_need_wakeup) { |
955 | if (xs->zc) |
956 | xsk_wakeup(xs, flags: pool->cached_need_wakeup); |
957 | else if (xs->tx) |
958 | /* Poll needs to drive Tx also in copy mode */ |
959 | xsk_generic_xmit(sk); |
960 | } |
961 | |
962 | skip_tx: |
963 | if (xs->rx && !xskq_prod_is_empty(q: xs->rx)) |
964 | mask |= EPOLLIN | EPOLLRDNORM; |
965 | if (xs->tx && xsk_tx_writeable(xs)) |
966 | mask |= EPOLLOUT | EPOLLWRNORM; |
967 | |
968 | rcu_read_unlock(); |
969 | return mask; |
970 | } |
971 | |
972 | static int xsk_init_queue(u32 entries, struct xsk_queue **queue, |
973 | bool umem_queue) |
974 | { |
975 | struct xsk_queue *q; |
976 | |
977 | if (entries == 0 || *queue || !is_power_of_2(n: entries)) |
978 | return -EINVAL; |
979 | |
980 | q = xskq_create(nentries: entries, umem_queue); |
981 | if (!q) |
982 | return -ENOMEM; |
983 | |
984 | /* Make sure queue is ready before it can be seen by others */ |
985 | smp_wmb(); |
986 | WRITE_ONCE(*queue, q); |
987 | return 0; |
988 | } |
989 | |
990 | static void xsk_unbind_dev(struct xdp_sock *xs) |
991 | { |
992 | struct net_device *dev = xs->dev; |
993 | |
994 | if (xs->state != XSK_BOUND) |
995 | return; |
996 | WRITE_ONCE(xs->state, XSK_UNBOUND); |
997 | |
998 | /* Wait for driver to stop using the xdp socket. */ |
999 | xp_del_xsk(pool: xs->pool, xs); |
1000 | synchronize_net(); |
1001 | dev_put(dev); |
1002 | } |
1003 | |
1004 | static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, |
1005 | struct xdp_sock __rcu ***map_entry) |
1006 | { |
1007 | struct xsk_map *map = NULL; |
1008 | struct xsk_map_node *node; |
1009 | |
1010 | *map_entry = NULL; |
1011 | |
1012 | spin_lock_bh(lock: &xs->map_list_lock); |
1013 | node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, |
1014 | node); |
1015 | if (node) { |
1016 | bpf_map_inc(map: &node->map->map); |
1017 | map = node->map; |
1018 | *map_entry = node->map_entry; |
1019 | } |
1020 | spin_unlock_bh(lock: &xs->map_list_lock); |
1021 | return map; |
1022 | } |
1023 | |
1024 | static void xsk_delete_from_maps(struct xdp_sock *xs) |
1025 | { |
1026 | /* This function removes the current XDP socket from all the |
1027 | * maps it resides in. We need to take extra care here, due to |
1028 | * the two locks involved. Each map has a lock synchronizing |
1029 | * updates to the entries, and each socket has a lock that |
1030 | * synchronizes access to the list of maps (map_list). For |
1031 | * deadlock avoidance the locks need to be taken in the order |
1032 | * "map lock"->"socket map list lock". We start off by |
1033 | * accessing the socket map list, and take a reference to the |
1034 | * map to guarantee existence between the |
1035 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete() |
1036 | * calls. Then we ask the map to remove the socket, which |
1037 | * tries to remove the socket from the map. Note that there |
1038 | * might be updates to the map between |
1039 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). |
1040 | */ |
1041 | struct xdp_sock __rcu **map_entry = NULL; |
1042 | struct xsk_map *map; |
1043 | |
1044 | while ((map = xsk_get_map_list_entry(xs, map_entry: &map_entry))) { |
1045 | xsk_map_try_sock_delete(map, xs, map_entry); |
1046 | bpf_map_put(map: &map->map); |
1047 | } |
1048 | } |
1049 | |
1050 | static int xsk_release(struct socket *sock) |
1051 | { |
1052 | struct sock *sk = sock->sk; |
1053 | struct xdp_sock *xs = xdp_sk(sk); |
1054 | struct net *net; |
1055 | |
1056 | if (!sk) |
1057 | return 0; |
1058 | |
1059 | net = sock_net(sk); |
1060 | |
1061 | if (xs->skb) |
1062 | xsk_drop_skb(skb: xs->skb); |
1063 | |
1064 | mutex_lock(&net->xdp.lock); |
1065 | sk_del_node_init_rcu(sk); |
1066 | mutex_unlock(lock: &net->xdp.lock); |
1067 | |
1068 | sock_prot_inuse_add(net, prot: sk->sk_prot, val: -1); |
1069 | |
1070 | xsk_delete_from_maps(xs); |
1071 | mutex_lock(&xs->mutex); |
1072 | xsk_unbind_dev(xs); |
1073 | mutex_unlock(lock: &xs->mutex); |
1074 | |
1075 | xskq_destroy(q_ops: xs->rx); |
1076 | xskq_destroy(q_ops: xs->tx); |
1077 | xskq_destroy(q_ops: xs->fq_tmp); |
1078 | xskq_destroy(q_ops: xs->cq_tmp); |
1079 | |
1080 | sock_orphan(sk); |
1081 | sock->sk = NULL; |
1082 | |
1083 | sock_put(sk); |
1084 | |
1085 | return 0; |
1086 | } |
1087 | |
1088 | static struct socket *xsk_lookup_xsk_from_fd(int fd) |
1089 | { |
1090 | struct socket *sock; |
1091 | int err; |
1092 | |
1093 | sock = sockfd_lookup(fd, err: &err); |
1094 | if (!sock) |
1095 | return ERR_PTR(error: -ENOTSOCK); |
1096 | |
1097 | if (sock->sk->sk_family != PF_XDP) { |
1098 | sockfd_put(sock); |
1099 | return ERR_PTR(error: -ENOPROTOOPT); |
1100 | } |
1101 | |
1102 | return sock; |
1103 | } |
1104 | |
1105 | static bool xsk_validate_queues(struct xdp_sock *xs) |
1106 | { |
1107 | return xs->fq_tmp && xs->cq_tmp; |
1108 | } |
1109 | |
1110 | static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) |
1111 | { |
1112 | struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; |
1113 | struct sock *sk = sock->sk; |
1114 | struct xdp_sock *xs = xdp_sk(sk); |
1115 | struct net_device *dev; |
1116 | int bound_dev_if; |
1117 | u32 flags, qid; |
1118 | int err = 0; |
1119 | |
1120 | if (addr_len < sizeof(struct sockaddr_xdp)) |
1121 | return -EINVAL; |
1122 | if (sxdp->sxdp_family != AF_XDP) |
1123 | return -EINVAL; |
1124 | |
1125 | flags = sxdp->sxdp_flags; |
1126 | if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | |
1127 | XDP_USE_NEED_WAKEUP | XDP_USE_SG)) |
1128 | return -EINVAL; |
1129 | |
1130 | bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); |
1131 | if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex) |
1132 | return -EINVAL; |
1133 | |
1134 | rtnl_lock(); |
1135 | mutex_lock(&xs->mutex); |
1136 | if (xs->state != XSK_READY) { |
1137 | err = -EBUSY; |
1138 | goto out_release; |
1139 | } |
1140 | |
1141 | dev = dev_get_by_index(net: sock_net(sk), ifindex: sxdp->sxdp_ifindex); |
1142 | if (!dev) { |
1143 | err = -ENODEV; |
1144 | goto out_release; |
1145 | } |
1146 | |
1147 | if (!xs->rx && !xs->tx) { |
1148 | err = -EINVAL; |
1149 | goto out_unlock; |
1150 | } |
1151 | |
1152 | qid = sxdp->sxdp_queue_id; |
1153 | |
1154 | if (flags & XDP_SHARED_UMEM) { |
1155 | struct xdp_sock *umem_xs; |
1156 | struct socket *sock; |
1157 | |
1158 | if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || |
1159 | (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) { |
1160 | /* Cannot specify flags for shared sockets. */ |
1161 | err = -EINVAL; |
1162 | goto out_unlock; |
1163 | } |
1164 | |
1165 | if (xs->umem) { |
1166 | /* We have already our own. */ |
1167 | err = -EINVAL; |
1168 | goto out_unlock; |
1169 | } |
1170 | |
1171 | sock = xsk_lookup_xsk_from_fd(fd: sxdp->sxdp_shared_umem_fd); |
1172 | if (IS_ERR(ptr: sock)) { |
1173 | err = PTR_ERR(ptr: sock); |
1174 | goto out_unlock; |
1175 | } |
1176 | |
1177 | umem_xs = xdp_sk(sk: sock->sk); |
1178 | if (!xsk_is_bound(xs: umem_xs)) { |
1179 | err = -EBADF; |
1180 | sockfd_put(sock); |
1181 | goto out_unlock; |
1182 | } |
1183 | |
1184 | if (umem_xs->queue_id != qid || umem_xs->dev != dev) { |
1185 | /* Share the umem with another socket on another qid |
1186 | * and/or device. |
1187 | */ |
1188 | xs->pool = xp_create_and_assign_umem(xs, |
1189 | umem: umem_xs->umem); |
1190 | if (!xs->pool) { |
1191 | err = -ENOMEM; |
1192 | sockfd_put(sock); |
1193 | goto out_unlock; |
1194 | } |
1195 | |
1196 | err = xp_assign_dev_shared(pool: xs->pool, umem_xs, dev, |
1197 | queue_id: qid); |
1198 | if (err) { |
1199 | xp_destroy(pool: xs->pool); |
1200 | xs->pool = NULL; |
1201 | sockfd_put(sock); |
1202 | goto out_unlock; |
1203 | } |
1204 | } else { |
1205 | /* Share the buffer pool with the other socket. */ |
1206 | if (xs->fq_tmp || xs->cq_tmp) { |
1207 | /* Do not allow setting your own fq or cq. */ |
1208 | err = -EINVAL; |
1209 | sockfd_put(sock); |
1210 | goto out_unlock; |
1211 | } |
1212 | |
1213 | xp_get_pool(pool: umem_xs->pool); |
1214 | xs->pool = umem_xs->pool; |
1215 | |
1216 | /* If underlying shared umem was created without Tx |
1217 | * ring, allocate Tx descs array that Tx batching API |
1218 | * utilizes |
1219 | */ |
1220 | if (xs->tx && !xs->pool->tx_descs) { |
1221 | err = xp_alloc_tx_descs(pool: xs->pool, xs); |
1222 | if (err) { |
1223 | xp_put_pool(pool: xs->pool); |
1224 | xs->pool = NULL; |
1225 | sockfd_put(sock); |
1226 | goto out_unlock; |
1227 | } |
1228 | } |
1229 | } |
1230 | |
1231 | xdp_get_umem(umem: umem_xs->umem); |
1232 | WRITE_ONCE(xs->umem, umem_xs->umem); |
1233 | sockfd_put(sock); |
1234 | } else if (!xs->umem || !xsk_validate_queues(xs)) { |
1235 | err = -EINVAL; |
1236 | goto out_unlock; |
1237 | } else { |
1238 | /* This xsk has its own umem. */ |
1239 | xs->pool = xp_create_and_assign_umem(xs, umem: xs->umem); |
1240 | if (!xs->pool) { |
1241 | err = -ENOMEM; |
1242 | goto out_unlock; |
1243 | } |
1244 | |
1245 | err = xp_assign_dev(pool: xs->pool, dev, queue_id: qid, flags); |
1246 | if (err) { |
1247 | xp_destroy(pool: xs->pool); |
1248 | xs->pool = NULL; |
1249 | goto out_unlock; |
1250 | } |
1251 | } |
1252 | |
1253 | /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ |
1254 | xs->fq_tmp = NULL; |
1255 | xs->cq_tmp = NULL; |
1256 | |
1257 | xs->dev = dev; |
1258 | xs->zc = xs->umem->zc; |
1259 | xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG); |
1260 | xs->queue_id = qid; |
1261 | xp_add_xsk(pool: xs->pool, xs); |
1262 | |
1263 | out_unlock: |
1264 | if (err) { |
1265 | dev_put(dev); |
1266 | } else { |
1267 | /* Matches smp_rmb() in bind() for shared umem |
1268 | * sockets, and xsk_is_bound(). |
1269 | */ |
1270 | smp_wmb(); |
1271 | WRITE_ONCE(xs->state, XSK_BOUND); |
1272 | } |
1273 | out_release: |
1274 | mutex_unlock(lock: &xs->mutex); |
1275 | rtnl_unlock(); |
1276 | return err; |
1277 | } |
1278 | |
1279 | struct xdp_umem_reg_v1 { |
1280 | __u64 addr; /* Start of packet data area */ |
1281 | __u64 len; /* Length of packet data area */ |
1282 | __u32 chunk_size; |
1283 | __u32 headroom; |
1284 | }; |
1285 | |
1286 | static int xsk_setsockopt(struct socket *sock, int level, int optname, |
1287 | sockptr_t optval, unsigned int optlen) |
1288 | { |
1289 | struct sock *sk = sock->sk; |
1290 | struct xdp_sock *xs = xdp_sk(sk); |
1291 | int err; |
1292 | |
1293 | if (level != SOL_XDP) |
1294 | return -ENOPROTOOPT; |
1295 | |
1296 | switch (optname) { |
1297 | case XDP_RX_RING: |
1298 | case XDP_TX_RING: |
1299 | { |
1300 | struct xsk_queue **q; |
1301 | int entries; |
1302 | |
1303 | if (optlen < sizeof(entries)) |
1304 | return -EINVAL; |
1305 | if (copy_from_sockptr(dst: &entries, src: optval, size: sizeof(entries))) |
1306 | return -EFAULT; |
1307 | |
1308 | mutex_lock(&xs->mutex); |
1309 | if (xs->state != XSK_READY) { |
1310 | mutex_unlock(lock: &xs->mutex); |
1311 | return -EBUSY; |
1312 | } |
1313 | q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; |
1314 | err = xsk_init_queue(entries, queue: q, umem_queue: false); |
1315 | if (!err && optname == XDP_TX_RING) |
1316 | /* Tx needs to be explicitly woken up the first time */ |
1317 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; |
1318 | mutex_unlock(lock: &xs->mutex); |
1319 | return err; |
1320 | } |
1321 | case XDP_UMEM_REG: |
1322 | { |
1323 | size_t mr_size = sizeof(struct xdp_umem_reg); |
1324 | struct xdp_umem_reg mr = {}; |
1325 | struct xdp_umem *umem; |
1326 | |
1327 | if (optlen < sizeof(struct xdp_umem_reg_v1)) |
1328 | return -EINVAL; |
1329 | else if (optlen < sizeof(mr)) |
1330 | mr_size = sizeof(struct xdp_umem_reg_v1); |
1331 | |
1332 | if (copy_from_sockptr(dst: &mr, src: optval, size: mr_size)) |
1333 | return -EFAULT; |
1334 | |
1335 | mutex_lock(&xs->mutex); |
1336 | if (xs->state != XSK_READY || xs->umem) { |
1337 | mutex_unlock(lock: &xs->mutex); |
1338 | return -EBUSY; |
1339 | } |
1340 | |
1341 | umem = xdp_umem_create(mr: &mr); |
1342 | if (IS_ERR(ptr: umem)) { |
1343 | mutex_unlock(lock: &xs->mutex); |
1344 | return PTR_ERR(ptr: umem); |
1345 | } |
1346 | |
1347 | /* Make sure umem is ready before it can be seen by others */ |
1348 | smp_wmb(); |
1349 | WRITE_ONCE(xs->umem, umem); |
1350 | mutex_unlock(lock: &xs->mutex); |
1351 | return 0; |
1352 | } |
1353 | case XDP_UMEM_FILL_RING: |
1354 | case XDP_UMEM_COMPLETION_RING: |
1355 | { |
1356 | struct xsk_queue **q; |
1357 | int entries; |
1358 | |
1359 | if (copy_from_sockptr(dst: &entries, src: optval, size: sizeof(entries))) |
1360 | return -EFAULT; |
1361 | |
1362 | mutex_lock(&xs->mutex); |
1363 | if (xs->state != XSK_READY) { |
1364 | mutex_unlock(lock: &xs->mutex); |
1365 | return -EBUSY; |
1366 | } |
1367 | |
1368 | q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : |
1369 | &xs->cq_tmp; |
1370 | err = xsk_init_queue(entries, queue: q, umem_queue: true); |
1371 | mutex_unlock(lock: &xs->mutex); |
1372 | return err; |
1373 | } |
1374 | default: |
1375 | break; |
1376 | } |
1377 | |
1378 | return -ENOPROTOOPT; |
1379 | } |
1380 | |
1381 | static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) |
1382 | { |
1383 | ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); |
1384 | ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); |
1385 | ring->desc = offsetof(struct xdp_rxtx_ring, desc); |
1386 | } |
1387 | |
1388 | static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) |
1389 | { |
1390 | ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); |
1391 | ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); |
1392 | ring->desc = offsetof(struct xdp_umem_ring, desc); |
1393 | } |
1394 | |
1395 | struct xdp_statistics_v1 { |
1396 | __u64 rx_dropped; |
1397 | __u64 rx_invalid_descs; |
1398 | __u64 tx_invalid_descs; |
1399 | }; |
1400 | |
1401 | static int xsk_getsockopt(struct socket *sock, int level, int optname, |
1402 | char __user *optval, int __user *optlen) |
1403 | { |
1404 | struct sock *sk = sock->sk; |
1405 | struct xdp_sock *xs = xdp_sk(sk); |
1406 | int len; |
1407 | |
1408 | if (level != SOL_XDP) |
1409 | return -ENOPROTOOPT; |
1410 | |
1411 | if (get_user(len, optlen)) |
1412 | return -EFAULT; |
1413 | if (len < 0) |
1414 | return -EINVAL; |
1415 | |
1416 | switch (optname) { |
1417 | case XDP_STATISTICS: |
1418 | { |
1419 | struct xdp_statistics stats = {}; |
1420 | bool = true; |
1421 | size_t stats_size; |
1422 | |
1423 | if (len < sizeof(struct xdp_statistics_v1)) { |
1424 | return -EINVAL; |
1425 | } else if (len < sizeof(stats)) { |
1426 | extra_stats = false; |
1427 | stats_size = sizeof(struct xdp_statistics_v1); |
1428 | } else { |
1429 | stats_size = sizeof(stats); |
1430 | } |
1431 | |
1432 | mutex_lock(&xs->mutex); |
1433 | stats.rx_dropped = xs->rx_dropped; |
1434 | if (extra_stats) { |
1435 | stats.rx_ring_full = xs->rx_queue_full; |
1436 | stats.rx_fill_ring_empty_descs = |
1437 | xs->pool ? xskq_nb_queue_empty_descs(q: xs->pool->fq) : 0; |
1438 | stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(q: xs->tx); |
1439 | } else { |
1440 | stats.rx_dropped += xs->rx_queue_full; |
1441 | } |
1442 | stats.rx_invalid_descs = xskq_nb_invalid_descs(q: xs->rx); |
1443 | stats.tx_invalid_descs = xskq_nb_invalid_descs(q: xs->tx); |
1444 | mutex_unlock(lock: &xs->mutex); |
1445 | |
1446 | if (copy_to_user(to: optval, from: &stats, n: stats_size)) |
1447 | return -EFAULT; |
1448 | if (put_user(stats_size, optlen)) |
1449 | return -EFAULT; |
1450 | |
1451 | return 0; |
1452 | } |
1453 | case XDP_MMAP_OFFSETS: |
1454 | { |
1455 | struct xdp_mmap_offsets off; |
1456 | struct xdp_mmap_offsets_v1 off_v1; |
1457 | bool flags_supported = true; |
1458 | void *to_copy; |
1459 | |
1460 | if (len < sizeof(off_v1)) |
1461 | return -EINVAL; |
1462 | else if (len < sizeof(off)) |
1463 | flags_supported = false; |
1464 | |
1465 | if (flags_supported) { |
1466 | /* xdp_ring_offset is identical to xdp_ring_offset_v1 |
1467 | * except for the flags field added to the end. |
1468 | */ |
1469 | xsk_enter_rxtx_offsets(ring: (struct xdp_ring_offset_v1 *) |
1470 | &off.rx); |
1471 | xsk_enter_rxtx_offsets(ring: (struct xdp_ring_offset_v1 *) |
1472 | &off.tx); |
1473 | xsk_enter_umem_offsets(ring: (struct xdp_ring_offset_v1 *) |
1474 | &off.fr); |
1475 | xsk_enter_umem_offsets(ring: (struct xdp_ring_offset_v1 *) |
1476 | &off.cr); |
1477 | off.rx.flags = offsetof(struct xdp_rxtx_ring, |
1478 | ptrs.flags); |
1479 | off.tx.flags = offsetof(struct xdp_rxtx_ring, |
1480 | ptrs.flags); |
1481 | off.fr.flags = offsetof(struct xdp_umem_ring, |
1482 | ptrs.flags); |
1483 | off.cr.flags = offsetof(struct xdp_umem_ring, |
1484 | ptrs.flags); |
1485 | |
1486 | len = sizeof(off); |
1487 | to_copy = &off; |
1488 | } else { |
1489 | xsk_enter_rxtx_offsets(ring: &off_v1.rx); |
1490 | xsk_enter_rxtx_offsets(ring: &off_v1.tx); |
1491 | xsk_enter_umem_offsets(ring: &off_v1.fr); |
1492 | xsk_enter_umem_offsets(ring: &off_v1.cr); |
1493 | |
1494 | len = sizeof(off_v1); |
1495 | to_copy = &off_v1; |
1496 | } |
1497 | |
1498 | if (copy_to_user(to: optval, from: to_copy, n: len)) |
1499 | return -EFAULT; |
1500 | if (put_user(len, optlen)) |
1501 | return -EFAULT; |
1502 | |
1503 | return 0; |
1504 | } |
1505 | case XDP_OPTIONS: |
1506 | { |
1507 | struct xdp_options opts = {}; |
1508 | |
1509 | if (len < sizeof(opts)) |
1510 | return -EINVAL; |
1511 | |
1512 | mutex_lock(&xs->mutex); |
1513 | if (xs->zc) |
1514 | opts.flags |= XDP_OPTIONS_ZEROCOPY; |
1515 | mutex_unlock(lock: &xs->mutex); |
1516 | |
1517 | len = sizeof(opts); |
1518 | if (copy_to_user(to: optval, from: &opts, n: len)) |
1519 | return -EFAULT; |
1520 | if (put_user(len, optlen)) |
1521 | return -EFAULT; |
1522 | |
1523 | return 0; |
1524 | } |
1525 | default: |
1526 | break; |
1527 | } |
1528 | |
1529 | return -EOPNOTSUPP; |
1530 | } |
1531 | |
1532 | static int xsk_mmap(struct file *file, struct socket *sock, |
1533 | struct vm_area_struct *vma) |
1534 | { |
1535 | loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
1536 | unsigned long size = vma->vm_end - vma->vm_start; |
1537 | struct xdp_sock *xs = xdp_sk(sk: sock->sk); |
1538 | int state = READ_ONCE(xs->state); |
1539 | struct xsk_queue *q = NULL; |
1540 | |
1541 | if (state != XSK_READY && state != XSK_BOUND) |
1542 | return -EBUSY; |
1543 | |
1544 | if (offset == XDP_PGOFF_RX_RING) { |
1545 | q = READ_ONCE(xs->rx); |
1546 | } else if (offset == XDP_PGOFF_TX_RING) { |
1547 | q = READ_ONCE(xs->tx); |
1548 | } else { |
1549 | /* Matches the smp_wmb() in XDP_UMEM_REG */ |
1550 | smp_rmb(); |
1551 | if (offset == XDP_UMEM_PGOFF_FILL_RING) |
1552 | q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) : |
1553 | READ_ONCE(xs->pool->fq); |
1554 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) |
1555 | q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) : |
1556 | READ_ONCE(xs->pool->cq); |
1557 | } |
1558 | |
1559 | if (!q) |
1560 | return -EINVAL; |
1561 | |
1562 | /* Matches the smp_wmb() in xsk_init_queue */ |
1563 | smp_rmb(); |
1564 | if (size > q->ring_vmalloc_size) |
1565 | return -EINVAL; |
1566 | |
1567 | return remap_vmalloc_range(vma, addr: q->ring, pgoff: 0); |
1568 | } |
1569 | |
1570 | static int xsk_notifier(struct notifier_block *this, |
1571 | unsigned long msg, void *ptr) |
1572 | { |
1573 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
1574 | struct net *net = dev_net(dev); |
1575 | struct sock *sk; |
1576 | |
1577 | switch (msg) { |
1578 | case NETDEV_UNREGISTER: |
1579 | mutex_lock(&net->xdp.lock); |
1580 | sk_for_each(sk, &net->xdp.list) { |
1581 | struct xdp_sock *xs = xdp_sk(sk); |
1582 | |
1583 | mutex_lock(&xs->mutex); |
1584 | if (xs->dev == dev) { |
1585 | sk->sk_err = ENETDOWN; |
1586 | if (!sock_flag(sk, flag: SOCK_DEAD)) |
1587 | sk_error_report(sk); |
1588 | |
1589 | xsk_unbind_dev(xs); |
1590 | |
1591 | /* Clear device references. */ |
1592 | xp_clear_dev(pool: xs->pool); |
1593 | } |
1594 | mutex_unlock(lock: &xs->mutex); |
1595 | } |
1596 | mutex_unlock(lock: &net->xdp.lock); |
1597 | break; |
1598 | } |
1599 | return NOTIFY_DONE; |
1600 | } |
1601 | |
1602 | static struct proto xsk_proto = { |
1603 | .name = "XDP" , |
1604 | .owner = THIS_MODULE, |
1605 | .obj_size = sizeof(struct xdp_sock), |
1606 | }; |
1607 | |
1608 | static const struct proto_ops xsk_proto_ops = { |
1609 | .family = PF_XDP, |
1610 | .owner = THIS_MODULE, |
1611 | .release = xsk_release, |
1612 | .bind = xsk_bind, |
1613 | .connect = sock_no_connect, |
1614 | .socketpair = sock_no_socketpair, |
1615 | .accept = sock_no_accept, |
1616 | .getname = sock_no_getname, |
1617 | .poll = xsk_poll, |
1618 | .ioctl = sock_no_ioctl, |
1619 | .listen = sock_no_listen, |
1620 | .shutdown = sock_no_shutdown, |
1621 | .setsockopt = xsk_setsockopt, |
1622 | .getsockopt = xsk_getsockopt, |
1623 | .sendmsg = xsk_sendmsg, |
1624 | .recvmsg = xsk_recvmsg, |
1625 | .mmap = xsk_mmap, |
1626 | }; |
1627 | |
1628 | static void xsk_destruct(struct sock *sk) |
1629 | { |
1630 | struct xdp_sock *xs = xdp_sk(sk); |
1631 | |
1632 | if (!sock_flag(sk, flag: SOCK_DEAD)) |
1633 | return; |
1634 | |
1635 | if (!xp_put_pool(pool: xs->pool)) |
1636 | xdp_put_umem(umem: xs->umem, defer_cleanup: !xs->pool); |
1637 | } |
1638 | |
1639 | static int xsk_create(struct net *net, struct socket *sock, int protocol, |
1640 | int kern) |
1641 | { |
1642 | struct xdp_sock *xs; |
1643 | struct sock *sk; |
1644 | |
1645 | if (!ns_capable(ns: net->user_ns, CAP_NET_RAW)) |
1646 | return -EPERM; |
1647 | if (sock->type != SOCK_RAW) |
1648 | return -ESOCKTNOSUPPORT; |
1649 | |
1650 | if (protocol) |
1651 | return -EPROTONOSUPPORT; |
1652 | |
1653 | sock->state = SS_UNCONNECTED; |
1654 | |
1655 | sk = sk_alloc(net, PF_XDP, GFP_KERNEL, prot: &xsk_proto, kern); |
1656 | if (!sk) |
1657 | return -ENOBUFS; |
1658 | |
1659 | sock->ops = &xsk_proto_ops; |
1660 | |
1661 | sock_init_data(sock, sk); |
1662 | |
1663 | sk->sk_family = PF_XDP; |
1664 | |
1665 | sk->sk_destruct = xsk_destruct; |
1666 | |
1667 | sock_set_flag(sk, flag: SOCK_RCU_FREE); |
1668 | |
1669 | xs = xdp_sk(sk); |
1670 | xs->state = XSK_READY; |
1671 | mutex_init(&xs->mutex); |
1672 | spin_lock_init(&xs->rx_lock); |
1673 | |
1674 | INIT_LIST_HEAD(list: &xs->map_list); |
1675 | spin_lock_init(&xs->map_list_lock); |
1676 | |
1677 | mutex_lock(&net->xdp.lock); |
1678 | sk_add_node_rcu(sk, list: &net->xdp.list); |
1679 | mutex_unlock(lock: &net->xdp.lock); |
1680 | |
1681 | sock_prot_inuse_add(net, prot: &xsk_proto, val: 1); |
1682 | |
1683 | return 0; |
1684 | } |
1685 | |
1686 | static const struct net_proto_family xsk_family_ops = { |
1687 | .family = PF_XDP, |
1688 | .create = xsk_create, |
1689 | .owner = THIS_MODULE, |
1690 | }; |
1691 | |
1692 | static struct notifier_block xsk_netdev_notifier = { |
1693 | .notifier_call = xsk_notifier, |
1694 | }; |
1695 | |
1696 | static int __net_init xsk_net_init(struct net *net) |
1697 | { |
1698 | mutex_init(&net->xdp.lock); |
1699 | INIT_HLIST_HEAD(&net->xdp.list); |
1700 | return 0; |
1701 | } |
1702 | |
1703 | static void __net_exit xsk_net_exit(struct net *net) |
1704 | { |
1705 | WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); |
1706 | } |
1707 | |
1708 | static struct pernet_operations xsk_net_ops = { |
1709 | .init = xsk_net_init, |
1710 | .exit = xsk_net_exit, |
1711 | }; |
1712 | |
1713 | static int __init xsk_init(void) |
1714 | { |
1715 | int err, cpu; |
1716 | |
1717 | err = proto_register(prot: &xsk_proto, alloc_slab: 0 /* no slab */); |
1718 | if (err) |
1719 | goto out; |
1720 | |
1721 | err = sock_register(fam: &xsk_family_ops); |
1722 | if (err) |
1723 | goto out_proto; |
1724 | |
1725 | err = register_pernet_subsys(&xsk_net_ops); |
1726 | if (err) |
1727 | goto out_sk; |
1728 | |
1729 | err = register_netdevice_notifier(nb: &xsk_netdev_notifier); |
1730 | if (err) |
1731 | goto out_pernet; |
1732 | |
1733 | for_each_possible_cpu(cpu) |
1734 | INIT_LIST_HEAD(list: &per_cpu(xskmap_flush_list, cpu)); |
1735 | return 0; |
1736 | |
1737 | out_pernet: |
1738 | unregister_pernet_subsys(&xsk_net_ops); |
1739 | out_sk: |
1740 | sock_unregister(PF_XDP); |
1741 | out_proto: |
1742 | proto_unregister(prot: &xsk_proto); |
1743 | out: |
1744 | return err; |
1745 | } |
1746 | |
1747 | fs_initcall(xsk_init); |
1748 | |