1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Interface for implementing AF_XDP zero-copy support in drivers. |
3 | * Copyright(c) 2020 Intel Corporation. |
4 | */ |
5 | |
6 | #ifndef _LINUX_XDP_SOCK_DRV_H |
7 | #define _LINUX_XDP_SOCK_DRV_H |
8 | |
9 | #include <net/xdp_sock.h> |
10 | #include <net/xsk_buff_pool.h> |
11 | |
12 | #define XDP_UMEM_MIN_CHUNK_SHIFT 11 |
13 | #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT) |
14 | |
15 | struct xsk_cb_desc { |
16 | void *src; |
17 | u8 off; |
18 | u8 bytes; |
19 | }; |
20 | |
21 | #ifdef CONFIG_XDP_SOCKETS |
22 | |
23 | void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); |
24 | bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); |
25 | u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); |
26 | void xsk_tx_release(struct xsk_buff_pool *pool); |
27 | struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, |
28 | u16 queue_id); |
29 | void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); |
30 | void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); |
31 | void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); |
32 | void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); |
33 | bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); |
34 | |
35 | static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) |
36 | { |
37 | return XDP_PACKET_HEADROOM + pool->headroom; |
38 | } |
39 | |
40 | static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) |
41 | { |
42 | return pool->chunk_size; |
43 | } |
44 | |
45 | static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) |
46 | { |
47 | return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool); |
48 | } |
49 | |
50 | static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, |
51 | struct xdp_rxq_info *rxq) |
52 | { |
53 | xp_set_rxq_info(pool, rxq); |
54 | } |
55 | |
56 | static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, |
57 | struct xsk_cb_desc *desc) |
58 | { |
59 | xp_fill_cb(pool, desc); |
60 | } |
61 | |
62 | static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) |
63 | { |
64 | #ifdef CONFIG_NET_RX_BUSY_POLL |
65 | return pool->heads[0].xdp.rxq->napi_id; |
66 | #else |
67 | return 0; |
68 | #endif |
69 | } |
70 | |
71 | static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, |
72 | unsigned long attrs) |
73 | { |
74 | xp_dma_unmap(pool, attrs); |
75 | } |
76 | |
77 | static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool, |
78 | struct device *dev, unsigned long attrs) |
79 | { |
80 | struct xdp_umem *umem = pool->umem; |
81 | |
82 | return xp_dma_map(pool, dev, attrs, pages: umem->pgs, nr_pages: umem->npgs); |
83 | } |
84 | |
85 | static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) |
86 | { |
87 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
88 | |
89 | return xp_get_dma(xskb); |
90 | } |
91 | |
92 | static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) |
93 | { |
94 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
95 | |
96 | return xp_get_frame_dma(xskb); |
97 | } |
98 | |
99 | static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) |
100 | { |
101 | return xp_alloc(pool); |
102 | } |
103 | |
104 | static inline bool xsk_is_eop_desc(struct xdp_desc *desc) |
105 | { |
106 | return !xp_mb_desc(desc); |
107 | } |
108 | |
109 | /* Returns as many entries as possible up to max. 0 <= N <= max. */ |
110 | static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) |
111 | { |
112 | return xp_alloc_batch(pool, xdp, max); |
113 | } |
114 | |
115 | static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) |
116 | { |
117 | return xp_can_alloc(pool, count); |
118 | } |
119 | |
120 | static inline void xsk_buff_free(struct xdp_buff *xdp) |
121 | { |
122 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
123 | struct list_head *xskb_list = &xskb->pool->xskb_list; |
124 | struct xdp_buff_xsk *pos, *tmp; |
125 | |
126 | if (likely(!xdp_buff_has_frags(xdp))) |
127 | goto out; |
128 | |
129 | list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { |
130 | list_del(entry: &pos->xskb_list_node); |
131 | xp_free(xskb: pos); |
132 | } |
133 | |
134 | xdp_get_shared_info_from_buff(xdp)->nr_frags = 0; |
135 | out: |
136 | xp_free(xskb); |
137 | } |
138 | |
139 | static inline void xsk_buff_add_frag(struct xdp_buff *xdp) |
140 | { |
141 | struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp); |
142 | |
143 | list_add_tail(new: &frag->xskb_list_node, head: &frag->pool->xskb_list); |
144 | } |
145 | |
146 | static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) |
147 | { |
148 | struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); |
149 | struct xdp_buff *ret = NULL; |
150 | struct xdp_buff_xsk *frag; |
151 | |
152 | frag = list_first_entry_or_null(&xskb->pool->xskb_list, |
153 | struct xdp_buff_xsk, xskb_list_node); |
154 | if (frag) { |
155 | list_del(entry: &frag->xskb_list_node); |
156 | ret = &frag->xdp; |
157 | } |
158 | |
159 | return ret; |
160 | } |
161 | |
162 | static inline void xsk_buff_del_tail(struct xdp_buff *tail) |
163 | { |
164 | struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp); |
165 | |
166 | list_del(entry: &xskb->xskb_list_node); |
167 | } |
168 | |
169 | static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) |
170 | { |
171 | struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); |
172 | struct xdp_buff_xsk *frag; |
173 | |
174 | frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, |
175 | xskb_list_node); |
176 | return &frag->xdp; |
177 | } |
178 | |
179 | static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) |
180 | { |
181 | xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; |
182 | xdp->data_meta = xdp->data; |
183 | xdp->data_end = xdp->data + size; |
184 | xdp->flags = 0; |
185 | } |
186 | |
187 | static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, |
188 | u64 addr) |
189 | { |
190 | return xp_raw_get_dma(pool, addr); |
191 | } |
192 | |
193 | static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
194 | { |
195 | return xp_raw_get_data(pool, addr); |
196 | } |
197 | |
198 | #define XDP_TXMD_FLAGS_VALID ( \ |
199 | XDP_TXMD_FLAGS_TIMESTAMP | \ |
200 | XDP_TXMD_FLAGS_CHECKSUM | \ |
201 | 0) |
202 | |
203 | static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) |
204 | { |
205 | return !(meta->flags & ~XDP_TXMD_FLAGS_VALID); |
206 | } |
207 | |
208 | static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) |
209 | { |
210 | struct xsk_tx_metadata *meta; |
211 | |
212 | if (!pool->tx_metadata_len) |
213 | return NULL; |
214 | |
215 | meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len; |
216 | if (unlikely(!xsk_buff_valid_tx_metadata(meta))) |
217 | return NULL; /* no way to signal the error to the user */ |
218 | |
219 | return meta; |
220 | } |
221 | |
222 | static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) |
223 | { |
224 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
225 | |
226 | if (!pool->dma_need_sync) |
227 | return; |
228 | |
229 | xp_dma_sync_for_cpu(xskb); |
230 | } |
231 | |
232 | static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, |
233 | dma_addr_t dma, |
234 | size_t size) |
235 | { |
236 | xp_dma_sync_for_device(pool, dma, size); |
237 | } |
238 | |
239 | #else |
240 | |
241 | static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) |
242 | { |
243 | } |
244 | |
245 | static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, |
246 | struct xdp_desc *desc) |
247 | { |
248 | return false; |
249 | } |
250 | |
251 | static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max) |
252 | { |
253 | return 0; |
254 | } |
255 | |
256 | static inline void xsk_tx_release(struct xsk_buff_pool *pool) |
257 | { |
258 | } |
259 | |
260 | static inline struct xsk_buff_pool * |
261 | xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id) |
262 | { |
263 | return NULL; |
264 | } |
265 | |
266 | static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) |
267 | { |
268 | } |
269 | |
270 | static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) |
271 | { |
272 | } |
273 | |
274 | static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) |
275 | { |
276 | } |
277 | |
278 | static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) |
279 | { |
280 | } |
281 | |
282 | static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) |
283 | { |
284 | return false; |
285 | } |
286 | |
287 | static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) |
288 | { |
289 | return 0; |
290 | } |
291 | |
292 | static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) |
293 | { |
294 | return 0; |
295 | } |
296 | |
297 | static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) |
298 | { |
299 | return 0; |
300 | } |
301 | |
302 | static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, |
303 | struct xdp_rxq_info *rxq) |
304 | { |
305 | } |
306 | |
307 | static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, |
308 | struct xsk_cb_desc *desc) |
309 | { |
310 | } |
311 | |
312 | static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) |
313 | { |
314 | return 0; |
315 | } |
316 | |
317 | static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, |
318 | unsigned long attrs) |
319 | { |
320 | } |
321 | |
322 | static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool, |
323 | struct device *dev, unsigned long attrs) |
324 | { |
325 | return 0; |
326 | } |
327 | |
328 | static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) |
329 | { |
330 | return 0; |
331 | } |
332 | |
333 | static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) |
334 | { |
335 | return 0; |
336 | } |
337 | |
338 | static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) |
339 | { |
340 | return NULL; |
341 | } |
342 | |
343 | static inline bool xsk_is_eop_desc(struct xdp_desc *desc) |
344 | { |
345 | return false; |
346 | } |
347 | |
348 | static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) |
349 | { |
350 | return 0; |
351 | } |
352 | |
353 | static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) |
354 | { |
355 | return false; |
356 | } |
357 | |
358 | static inline void xsk_buff_free(struct xdp_buff *xdp) |
359 | { |
360 | } |
361 | |
362 | static inline void xsk_buff_add_frag(struct xdp_buff *xdp) |
363 | { |
364 | } |
365 | |
366 | static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) |
367 | { |
368 | return NULL; |
369 | } |
370 | |
371 | static inline void xsk_buff_del_tail(struct xdp_buff *tail) |
372 | { |
373 | } |
374 | |
375 | static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) |
376 | { |
377 | return NULL; |
378 | } |
379 | |
380 | static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) |
381 | { |
382 | } |
383 | |
384 | static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, |
385 | u64 addr) |
386 | { |
387 | return 0; |
388 | } |
389 | |
390 | static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
391 | { |
392 | return NULL; |
393 | } |
394 | |
395 | static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) |
396 | { |
397 | return false; |
398 | } |
399 | |
400 | static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) |
401 | { |
402 | return NULL; |
403 | } |
404 | |
405 | static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) |
406 | { |
407 | } |
408 | |
409 | static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, |
410 | dma_addr_t dma, |
411 | size_t size) |
412 | { |
413 | } |
414 | |
415 | #endif /* CONFIG_XDP_SOCKETS */ |
416 | |
417 | #endif /* _LINUX_XDP_SOCK_DRV_H */ |
418 | |