1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* AF_XDP internal functions |
3 | * Copyright(c) 2018 Intel Corporation. |
4 | */ |
5 | |
6 | #ifndef _LINUX_XDP_SOCK_H |
7 | #define _LINUX_XDP_SOCK_H |
8 | |
9 | #include <linux/bpf.h> |
10 | #include <linux/workqueue.h> |
11 | #include <linux/if_xdp.h> |
12 | #include <linux/mutex.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/mm.h> |
15 | #include <net/sock.h> |
16 | |
17 | #define XDP_UMEM_SG_FLAG (1 << 1) |
18 | |
19 | struct net_device; |
20 | struct xsk_queue; |
21 | struct xdp_buff; |
22 | |
23 | struct xdp_umem { |
24 | void *addrs; |
25 | u64 size; |
26 | u32 headroom; |
27 | u32 chunk_size; |
28 | u32 chunks; |
29 | u32 npgs; |
30 | struct user_struct *user; |
31 | refcount_t users; |
32 | u8 flags; |
33 | bool zc; |
34 | struct page **pgs; |
35 | int id; |
36 | struct list_head xsk_dma_list; |
37 | struct work_struct work; |
38 | }; |
39 | |
40 | struct xsk_map { |
41 | struct bpf_map map; |
42 | spinlock_t lock; /* Synchronize map updates */ |
43 | atomic_t count; |
44 | struct xdp_sock __rcu *xsk_map[]; |
45 | }; |
46 | |
47 | struct xdp_sock { |
48 | /* struct sock must be the first member of struct xdp_sock */ |
49 | struct sock sk; |
50 | struct xsk_queue *rx ____cacheline_aligned_in_smp; |
51 | struct net_device *dev; |
52 | struct xdp_umem *umem; |
53 | struct list_head flush_node; |
54 | struct xsk_buff_pool *pool; |
55 | u16 queue_id; |
56 | bool zc; |
57 | bool sg; |
58 | enum { |
59 | XSK_READY = 0, |
60 | XSK_BOUND, |
61 | XSK_UNBOUND, |
62 | } state; |
63 | |
64 | struct xsk_queue *tx ____cacheline_aligned_in_smp; |
65 | struct list_head tx_list; |
66 | /* record the number of tx descriptors sent by this xsk and |
67 | * when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs |
68 | * to be given to other xsks for sending tx descriptors, thereby |
69 | * preventing other XSKs from being starved. |
70 | */ |
71 | u32 tx_budget_spent; |
72 | |
73 | /* Protects generic receive. */ |
74 | spinlock_t rx_lock; |
75 | |
76 | /* Statistics */ |
77 | u64 rx_dropped; |
78 | u64 rx_queue_full; |
79 | |
80 | /* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current |
81 | * packet, the partially built skb is saved here so that packet building can resume in next |
82 | * call of __xsk_generic_xmit(). |
83 | */ |
84 | struct sk_buff *skb; |
85 | |
86 | struct list_head map_list; |
87 | /* Protects map_list */ |
88 | spinlock_t map_list_lock; |
89 | /* Protects multiple processes in the control path */ |
90 | struct mutex mutex; |
91 | struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */ |
92 | struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ |
93 | }; |
94 | |
95 | #ifdef CONFIG_XDP_SOCKETS |
96 | |
97 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
98 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); |
99 | void __xsk_map_flush(void); |
100 | |
101 | #else |
102 | |
103 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
104 | { |
105 | return -ENOTSUPP; |
106 | } |
107 | |
108 | static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
109 | { |
110 | return -EOPNOTSUPP; |
111 | } |
112 | |
113 | static inline void __xsk_map_flush(void) |
114 | { |
115 | } |
116 | |
117 | #endif /* CONFIG_XDP_SOCKETS */ |
118 | |
119 | #if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET) |
120 | bool xsk_map_check_flush(void); |
121 | #else |
122 | static inline bool xsk_map_check_flush(void) |
123 | { |
124 | return false; |
125 | } |
126 | #endif |
127 | |
128 | #endif /* _LINUX_XDP_SOCK_H */ |
129 | |