1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* net/core/xdp.c |
3 | * |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. |
5 | */ |
6 | #include <linux/bpf.h> |
7 | #include <linux/btf.h> |
8 | #include <linux/btf_ids.h> |
9 | #include <linux/filter.h> |
10 | #include <linux/types.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/idr.h> |
15 | #include <linux/rhashtable.h> |
16 | #include <linux/bug.h> |
17 | #include <net/page_pool/helpers.h> |
18 | |
19 | #include <net/hotdata.h> |
20 | #include <net/xdp.h> |
21 | #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ |
22 | #include <trace/events/xdp.h> |
23 | #include <net/xdp_sock_drv.h> |
24 | |
25 | #define REG_STATE_NEW 0x0 |
26 | #define REG_STATE_REGISTERED 0x1 |
27 | #define REG_STATE_UNREGISTERED 0x2 |
28 | #define REG_STATE_UNUSED 0x3 |
29 | |
30 | static DEFINE_IDA(mem_id_pool); |
31 | static DEFINE_MUTEX(mem_id_lock); |
32 | #define MEM_ID_MAX 0xFFFE |
33 | #define MEM_ID_MIN 1 |
34 | static int mem_id_next = MEM_ID_MIN; |
35 | |
36 | static bool mem_id_init; /* false */ |
37 | static struct rhashtable *mem_id_ht; |
38 | |
39 | static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) |
40 | { |
41 | const u32 *k = data; |
42 | const u32 key = *k; |
43 | |
44 | BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) |
45 | != sizeof(u32)); |
46 | |
47 | /* Use cyclic increasing ID as direct hash key */ |
48 | return key; |
49 | } |
50 | |
51 | static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, |
52 | const void *ptr) |
53 | { |
54 | const struct xdp_mem_allocator *xa = ptr; |
55 | u32 mem_id = *(u32 *)arg->key; |
56 | |
57 | return xa->mem.id != mem_id; |
58 | } |
59 | |
60 | static const struct rhashtable_params mem_id_rht_params = { |
61 | .nelem_hint = 64, |
62 | .head_offset = offsetof(struct xdp_mem_allocator, node), |
63 | .key_offset = offsetof(struct xdp_mem_allocator, mem.id), |
64 | .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), |
65 | .max_size = MEM_ID_MAX, |
66 | .min_size = 8, |
67 | .automatic_shrinking = true, |
68 | .hashfn = xdp_mem_id_hashfn, |
69 | .obj_cmpfn = xdp_mem_id_cmp, |
70 | }; |
71 | |
72 | static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) |
73 | { |
74 | struct xdp_mem_allocator *xa; |
75 | |
76 | xa = container_of(rcu, struct xdp_mem_allocator, rcu); |
77 | |
78 | /* Allow this ID to be reused */ |
79 | ida_free(&mem_id_pool, id: xa->mem.id); |
80 | |
81 | kfree(objp: xa); |
82 | } |
83 | |
84 | static void mem_xa_remove(struct xdp_mem_allocator *xa) |
85 | { |
86 | trace_mem_disconnect(xa); |
87 | |
88 | if (!rhashtable_remove_fast(ht: mem_id_ht, obj: &xa->node, params: mem_id_rht_params)) |
89 | call_rcu(head: &xa->rcu, func: __xdp_mem_allocator_rcu_free); |
90 | } |
91 | |
92 | static void mem_allocator_disconnect(void *allocator) |
93 | { |
94 | struct xdp_mem_allocator *xa; |
95 | struct rhashtable_iter iter; |
96 | |
97 | mutex_lock(&mem_id_lock); |
98 | |
99 | rhashtable_walk_enter(ht: mem_id_ht, iter: &iter); |
100 | do { |
101 | rhashtable_walk_start(iter: &iter); |
102 | |
103 | while ((xa = rhashtable_walk_next(iter: &iter)) && !IS_ERR(ptr: xa)) { |
104 | if (xa->allocator == allocator) |
105 | mem_xa_remove(xa); |
106 | } |
107 | |
108 | rhashtable_walk_stop(iter: &iter); |
109 | |
110 | } while (xa == ERR_PTR(error: -EAGAIN)); |
111 | rhashtable_walk_exit(iter: &iter); |
112 | |
113 | mutex_unlock(lock: &mem_id_lock); |
114 | } |
115 | |
116 | void xdp_unreg_mem_model(struct xdp_mem_info *mem) |
117 | { |
118 | struct xdp_mem_allocator *xa; |
119 | int type = mem->type; |
120 | int id = mem->id; |
121 | |
122 | /* Reset mem info to defaults */ |
123 | mem->id = 0; |
124 | mem->type = 0; |
125 | |
126 | if (id == 0) |
127 | return; |
128 | |
129 | if (type == MEM_TYPE_PAGE_POOL) { |
130 | rcu_read_lock(); |
131 | xa = rhashtable_lookup(ht: mem_id_ht, key: &id, params: mem_id_rht_params); |
132 | page_pool_destroy(pool: xa->page_pool); |
133 | rcu_read_unlock(); |
134 | } |
135 | } |
136 | EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); |
137 | |
138 | void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) |
139 | { |
140 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
141 | WARN(1, "Missing register, driver bug" ); |
142 | return; |
143 | } |
144 | |
145 | xdp_unreg_mem_model(&xdp_rxq->mem); |
146 | } |
147 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); |
148 | |
149 | void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) |
150 | { |
151 | /* Simplify driver cleanup code paths, allow unreg "unused" */ |
152 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) |
153 | return; |
154 | |
155 | xdp_rxq_info_unreg_mem_model(xdp_rxq); |
156 | |
157 | xdp_rxq->reg_state = REG_STATE_UNREGISTERED; |
158 | xdp_rxq->dev = NULL; |
159 | } |
160 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); |
161 | |
162 | static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) |
163 | { |
164 | memset(xdp_rxq, 0, sizeof(*xdp_rxq)); |
165 | } |
166 | |
167 | /* Returns 0 on success, negative on failure */ |
168 | int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
169 | struct net_device *dev, u32 queue_index, |
170 | unsigned int napi_id, u32 frag_size) |
171 | { |
172 | if (!dev) { |
173 | WARN(1, "Missing net_device from driver" ); |
174 | return -ENODEV; |
175 | } |
176 | |
177 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) { |
178 | WARN(1, "Driver promised not to register this" ); |
179 | return -EINVAL; |
180 | } |
181 | |
182 | if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { |
183 | WARN(1, "Missing unregister, handled but fix driver" ); |
184 | xdp_rxq_info_unreg(xdp_rxq); |
185 | } |
186 | |
187 | /* State either UNREGISTERED or NEW */ |
188 | xdp_rxq_info_init(xdp_rxq); |
189 | xdp_rxq->dev = dev; |
190 | xdp_rxq->queue_index = queue_index; |
191 | xdp_rxq->napi_id = napi_id; |
192 | xdp_rxq->frag_size = frag_size; |
193 | |
194 | xdp_rxq->reg_state = REG_STATE_REGISTERED; |
195 | return 0; |
196 | } |
197 | EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg); |
198 | |
199 | void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) |
200 | { |
201 | xdp_rxq->reg_state = REG_STATE_UNUSED; |
202 | } |
203 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); |
204 | |
205 | bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) |
206 | { |
207 | return (xdp_rxq->reg_state == REG_STATE_REGISTERED); |
208 | } |
209 | EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); |
210 | |
211 | static int __mem_id_init_hash_table(void) |
212 | { |
213 | struct rhashtable *rht; |
214 | int ret; |
215 | |
216 | if (unlikely(mem_id_init)) |
217 | return 0; |
218 | |
219 | rht = kzalloc(size: sizeof(*rht), GFP_KERNEL); |
220 | if (!rht) |
221 | return -ENOMEM; |
222 | |
223 | ret = rhashtable_init(ht: rht, params: &mem_id_rht_params); |
224 | if (ret < 0) { |
225 | kfree(objp: rht); |
226 | return ret; |
227 | } |
228 | mem_id_ht = rht; |
229 | smp_mb(); /* mutex lock should provide enough pairing */ |
230 | mem_id_init = true; |
231 | |
232 | return 0; |
233 | } |
234 | |
235 | /* Allocate a cyclic ID that maps to allocator pointer. |
236 | * See: https://www.kernel.org/doc/html/latest/core-api/idr.html |
237 | * |
238 | * Caller must lock mem_id_lock. |
239 | */ |
240 | static int __mem_id_cyclic_get(gfp_t gfp) |
241 | { |
242 | int retries = 1; |
243 | int id; |
244 | |
245 | again: |
246 | id = ida_alloc_range(&mem_id_pool, min: mem_id_next, MEM_ID_MAX - 1, gfp); |
247 | if (id < 0) { |
248 | if (id == -ENOSPC) { |
249 | /* Cyclic allocator, reset next id */ |
250 | if (retries--) { |
251 | mem_id_next = MEM_ID_MIN; |
252 | goto again; |
253 | } |
254 | } |
255 | return id; /* errno */ |
256 | } |
257 | mem_id_next = id + 1; |
258 | |
259 | return id; |
260 | } |
261 | |
262 | static bool __is_supported_mem_type(enum xdp_mem_type type) |
263 | { |
264 | if (type == MEM_TYPE_PAGE_POOL) |
265 | return is_page_pool_compiled_in(); |
266 | |
267 | if (type >= MEM_TYPE_MAX) |
268 | return false; |
269 | |
270 | return true; |
271 | } |
272 | |
273 | static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem, |
274 | enum xdp_mem_type type, |
275 | void *allocator) |
276 | { |
277 | struct xdp_mem_allocator *xdp_alloc; |
278 | gfp_t gfp = GFP_KERNEL; |
279 | int id, errno, ret; |
280 | void *ptr; |
281 | |
282 | if (!__is_supported_mem_type(type)) |
283 | return ERR_PTR(error: -EOPNOTSUPP); |
284 | |
285 | mem->type = type; |
286 | |
287 | if (!allocator) { |
288 | if (type == MEM_TYPE_PAGE_POOL) |
289 | return ERR_PTR(error: -EINVAL); /* Setup time check page_pool req */ |
290 | return NULL; |
291 | } |
292 | |
293 | /* Delay init of rhashtable to save memory if feature isn't used */ |
294 | if (!mem_id_init) { |
295 | mutex_lock(&mem_id_lock); |
296 | ret = __mem_id_init_hash_table(); |
297 | mutex_unlock(lock: &mem_id_lock); |
298 | if (ret < 0) { |
299 | WARN_ON(1); |
300 | return ERR_PTR(error: ret); |
301 | } |
302 | } |
303 | |
304 | xdp_alloc = kzalloc(size: sizeof(*xdp_alloc), flags: gfp); |
305 | if (!xdp_alloc) |
306 | return ERR_PTR(error: -ENOMEM); |
307 | |
308 | mutex_lock(&mem_id_lock); |
309 | id = __mem_id_cyclic_get(gfp); |
310 | if (id < 0) { |
311 | errno = id; |
312 | goto err; |
313 | } |
314 | mem->id = id; |
315 | xdp_alloc->mem = *mem; |
316 | xdp_alloc->allocator = allocator; |
317 | |
318 | /* Insert allocator into ID lookup table */ |
319 | ptr = rhashtable_insert_slow(ht: mem_id_ht, key: &id, obj: &xdp_alloc->node); |
320 | if (IS_ERR(ptr)) { |
321 | ida_free(&mem_id_pool, id: mem->id); |
322 | mem->id = 0; |
323 | errno = PTR_ERR(ptr); |
324 | goto err; |
325 | } |
326 | |
327 | if (type == MEM_TYPE_PAGE_POOL) |
328 | page_pool_use_xdp_mem(pool: allocator, disconnect: mem_allocator_disconnect, mem); |
329 | |
330 | mutex_unlock(lock: &mem_id_lock); |
331 | |
332 | return xdp_alloc; |
333 | err: |
334 | mutex_unlock(lock: &mem_id_lock); |
335 | kfree(objp: xdp_alloc); |
336 | return ERR_PTR(error: errno); |
337 | } |
338 | |
339 | int xdp_reg_mem_model(struct xdp_mem_info *mem, |
340 | enum xdp_mem_type type, void *allocator) |
341 | { |
342 | struct xdp_mem_allocator *xdp_alloc; |
343 | |
344 | xdp_alloc = __xdp_reg_mem_model(mem, type, allocator); |
345 | if (IS_ERR(ptr: xdp_alloc)) |
346 | return PTR_ERR(ptr: xdp_alloc); |
347 | return 0; |
348 | } |
349 | EXPORT_SYMBOL_GPL(xdp_reg_mem_model); |
350 | |
351 | int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, |
352 | enum xdp_mem_type type, void *allocator) |
353 | { |
354 | struct xdp_mem_allocator *xdp_alloc; |
355 | |
356 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
357 | WARN(1, "Missing register, driver bug" ); |
358 | return -EFAULT; |
359 | } |
360 | |
361 | xdp_alloc = __xdp_reg_mem_model(mem: &xdp_rxq->mem, type, allocator); |
362 | if (IS_ERR(ptr: xdp_alloc)) |
363 | return PTR_ERR(ptr: xdp_alloc); |
364 | |
365 | if (trace_mem_connect_enabled() && xdp_alloc) |
366 | trace_mem_connect(xa: xdp_alloc, rxq: xdp_rxq); |
367 | return 0; |
368 | } |
369 | |
370 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
371 | |
372 | /* XDP RX runs under NAPI protection, and in different delivery error |
373 | * scenarios (e.g. queue full), it is possible to return the xdp_frame |
374 | * while still leveraging this protection. The @napi_direct boolean |
375 | * is used for those calls sites. Thus, allowing for faster recycling |
376 | * of xdp_frames/pages in those cases. |
377 | */ |
378 | void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, |
379 | struct xdp_buff *xdp) |
380 | { |
381 | struct page *page; |
382 | |
383 | switch (mem->type) { |
384 | case MEM_TYPE_PAGE_POOL: |
385 | page = virt_to_head_page(x: data); |
386 | if (napi_direct && xdp_return_frame_no_direct()) |
387 | napi_direct = false; |
388 | /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) |
389 | * as mem->type knows this a page_pool page |
390 | */ |
391 | page_pool_put_full_page(pool: page->pp, page, allow_direct: napi_direct); |
392 | break; |
393 | case MEM_TYPE_PAGE_SHARED: |
394 | page_frag_free(addr: data); |
395 | break; |
396 | case MEM_TYPE_PAGE_ORDER0: |
397 | page = virt_to_page(data); /* Assumes order0 page*/ |
398 | put_page(page); |
399 | break; |
400 | case MEM_TYPE_XSK_BUFF_POOL: |
401 | /* NB! Only valid from an xdp_buff! */ |
402 | xsk_buff_free(xdp); |
403 | break; |
404 | default: |
405 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ |
406 | WARN(1, "Incorrect XDP memory type (%d) usage" , mem->type); |
407 | break; |
408 | } |
409 | } |
410 | |
411 | void xdp_return_frame(struct xdp_frame *xdpf) |
412 | { |
413 | struct skb_shared_info *sinfo; |
414 | int i; |
415 | |
416 | if (likely(!xdp_frame_has_frags(xdpf))) |
417 | goto out; |
418 | |
419 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
420 | for (i = 0; i < sinfo->nr_frags; i++) { |
421 | struct page *page = skb_frag_page(frag: &sinfo->frags[i]); |
422 | |
423 | __xdp_return(page_address(page), mem: &xdpf->mem, napi_direct: false, NULL); |
424 | } |
425 | out: |
426 | __xdp_return(data: xdpf->data, mem: &xdpf->mem, napi_direct: false, NULL); |
427 | } |
428 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
429 | |
430 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
431 | { |
432 | struct skb_shared_info *sinfo; |
433 | int i; |
434 | |
435 | if (likely(!xdp_frame_has_frags(xdpf))) |
436 | goto out; |
437 | |
438 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
439 | for (i = 0; i < sinfo->nr_frags; i++) { |
440 | struct page *page = skb_frag_page(frag: &sinfo->frags[i]); |
441 | |
442 | __xdp_return(page_address(page), mem: &xdpf->mem, napi_direct: true, NULL); |
443 | } |
444 | out: |
445 | __xdp_return(data: xdpf->data, mem: &xdpf->mem, napi_direct: true, NULL); |
446 | } |
447 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); |
448 | |
449 | /* XDP bulk APIs introduce a defer/flush mechanism to return |
450 | * pages belonging to the same xdp_mem_allocator object |
451 | * (identified via the mem.id field) in bulk to optimize |
452 | * I-cache and D-cache. |
453 | * The bulk queue size is set to 16 to be aligned to how |
454 | * XDP_REDIRECT bulking works. The bulk is flushed when |
455 | * it is full or when mem.id changes. |
456 | * xdp_frame_bulk is usually stored/allocated on the function |
457 | * call-stack to avoid locking penalties. |
458 | */ |
459 | void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) |
460 | { |
461 | struct xdp_mem_allocator *xa = bq->xa; |
462 | |
463 | if (unlikely(!xa || !bq->count)) |
464 | return; |
465 | |
466 | page_pool_put_page_bulk(pool: xa->page_pool, data: bq->q, count: bq->count); |
467 | /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ |
468 | bq->count = 0; |
469 | } |
470 | EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk); |
471 | |
472 | /* Must be called with rcu_read_lock held */ |
473 | void xdp_return_frame_bulk(struct xdp_frame *xdpf, |
474 | struct xdp_frame_bulk *bq) |
475 | { |
476 | struct xdp_mem_info *mem = &xdpf->mem; |
477 | struct xdp_mem_allocator *xa; |
478 | |
479 | if (mem->type != MEM_TYPE_PAGE_POOL) { |
480 | xdp_return_frame(xdpf); |
481 | return; |
482 | } |
483 | |
484 | xa = bq->xa; |
485 | if (unlikely(!xa)) { |
486 | xa = rhashtable_lookup(ht: mem_id_ht, key: &mem->id, params: mem_id_rht_params); |
487 | bq->count = 0; |
488 | bq->xa = xa; |
489 | } |
490 | |
491 | if (bq->count == XDP_BULK_QUEUE_SIZE) |
492 | xdp_flush_frame_bulk(bq); |
493 | |
494 | if (unlikely(mem->id != xa->mem.id)) { |
495 | xdp_flush_frame_bulk(bq); |
496 | bq->xa = rhashtable_lookup(ht: mem_id_ht, key: &mem->id, params: mem_id_rht_params); |
497 | } |
498 | |
499 | if (unlikely(xdp_frame_has_frags(xdpf))) { |
500 | struct skb_shared_info *sinfo; |
501 | int i; |
502 | |
503 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
504 | for (i = 0; i < sinfo->nr_frags; i++) { |
505 | skb_frag_t *frag = &sinfo->frags[i]; |
506 | |
507 | bq->q[bq->count++] = skb_frag_address(frag); |
508 | if (bq->count == XDP_BULK_QUEUE_SIZE) |
509 | xdp_flush_frame_bulk(bq); |
510 | } |
511 | } |
512 | bq->q[bq->count++] = xdpf->data; |
513 | } |
514 | EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); |
515 | |
516 | void xdp_return_buff(struct xdp_buff *xdp) |
517 | { |
518 | struct skb_shared_info *sinfo; |
519 | int i; |
520 | |
521 | if (likely(!xdp_buff_has_frags(xdp))) |
522 | goto out; |
523 | |
524 | sinfo = xdp_get_shared_info_from_buff(xdp); |
525 | for (i = 0; i < sinfo->nr_frags; i++) { |
526 | struct page *page = skb_frag_page(frag: &sinfo->frags[i]); |
527 | |
528 | __xdp_return(page_address(page), mem: &xdp->rxq->mem, napi_direct: true, xdp); |
529 | } |
530 | out: |
531 | __xdp_return(data: xdp->data, mem: &xdp->rxq->mem, napi_direct: true, xdp); |
532 | } |
533 | EXPORT_SYMBOL_GPL(xdp_return_buff); |
534 | |
535 | void xdp_attachment_setup(struct xdp_attachment_info *info, |
536 | struct netdev_bpf *bpf) |
537 | { |
538 | if (info->prog) |
539 | bpf_prog_put(prog: info->prog); |
540 | info->prog = bpf->prog; |
541 | info->flags = bpf->flags; |
542 | } |
543 | EXPORT_SYMBOL_GPL(xdp_attachment_setup); |
544 | |
545 | struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) |
546 | { |
547 | unsigned int metasize, totsize; |
548 | void *addr, *data_to_copy; |
549 | struct xdp_frame *xdpf; |
550 | struct page *page; |
551 | |
552 | /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ |
553 | metasize = xdp_data_meta_unsupported(xdp) ? 0 : |
554 | xdp->data - xdp->data_meta; |
555 | totsize = xdp->data_end - xdp->data + metasize; |
556 | |
557 | if (sizeof(*xdpf) + totsize > PAGE_SIZE) |
558 | return NULL; |
559 | |
560 | page = dev_alloc_page(); |
561 | if (!page) |
562 | return NULL; |
563 | |
564 | addr = page_to_virt(page); |
565 | xdpf = addr; |
566 | memset(xdpf, 0, sizeof(*xdpf)); |
567 | |
568 | addr += sizeof(*xdpf); |
569 | data_to_copy = metasize ? xdp->data_meta : xdp->data; |
570 | memcpy(addr, data_to_copy, totsize); |
571 | |
572 | xdpf->data = addr + metasize; |
573 | xdpf->len = totsize - metasize; |
574 | xdpf->headroom = 0; |
575 | xdpf->metasize = metasize; |
576 | xdpf->frame_sz = PAGE_SIZE; |
577 | xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; |
578 | |
579 | xsk_buff_free(xdp); |
580 | return xdpf; |
581 | } |
582 | EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); |
583 | |
584 | /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */ |
585 | void xdp_warn(const char *msg, const char *func, const int line) |
586 | { |
587 | WARN(1, "XDP_WARN: %s(line:%d): %s\n" , func, line, msg); |
588 | }; |
589 | EXPORT_SYMBOL_GPL(xdp_warn); |
590 | |
591 | int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) |
592 | { |
593 | n_skb = kmem_cache_alloc_bulk(s: net_hotdata.skbuff_cache, flags: gfp, size: n_skb, p: skbs); |
594 | if (unlikely(!n_skb)) |
595 | return -ENOMEM; |
596 | |
597 | return 0; |
598 | } |
599 | EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk); |
600 | |
601 | struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, |
602 | struct sk_buff *skb, |
603 | struct net_device *dev) |
604 | { |
605 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
606 | unsigned int headroom, frame_size; |
607 | void *hard_start; |
608 | u8 nr_frags; |
609 | |
610 | /* xdp frags frame */ |
611 | if (unlikely(xdp_frame_has_frags(xdpf))) |
612 | nr_frags = sinfo->nr_frags; |
613 | |
614 | /* Part of headroom was reserved to xdpf */ |
615 | headroom = sizeof(*xdpf) + xdpf->headroom; |
616 | |
617 | /* Memory size backing xdp_frame data already have reserved |
618 | * room for build_skb to place skb_shared_info in tailroom. |
619 | */ |
620 | frame_size = xdpf->frame_sz; |
621 | |
622 | hard_start = xdpf->data - headroom; |
623 | skb = build_skb_around(skb, data: hard_start, frag_size: frame_size); |
624 | if (unlikely(!skb)) |
625 | return NULL; |
626 | |
627 | skb_reserve(skb, len: headroom); |
628 | __skb_put(skb, len: xdpf->len); |
629 | if (xdpf->metasize) |
630 | skb_metadata_set(skb, meta_len: xdpf->metasize); |
631 | |
632 | if (unlikely(xdp_frame_has_frags(xdpf))) |
633 | xdp_update_skb_shared_info(skb, nr_frags, |
634 | size: sinfo->xdp_frags_size, |
635 | truesize: nr_frags * xdpf->frame_sz, |
636 | pfmemalloc: xdp_frame_is_frag_pfmemalloc(frame: xdpf)); |
637 | |
638 | /* Essential SKB info: protocol and skb->dev */ |
639 | skb->protocol = eth_type_trans(skb, dev); |
640 | |
641 | /* Optional SKB info, currently missing: |
642 | * - HW checksum info (skb->ip_summed) |
643 | * - HW RX hash (skb_set_hash) |
644 | * - RX ring dev queue index (skb_record_rx_queue) |
645 | */ |
646 | |
647 | if (xdpf->mem.type == MEM_TYPE_PAGE_POOL) |
648 | skb_mark_for_recycle(skb); |
649 | |
650 | /* Allow SKB to reuse area used by xdp_frame */ |
651 | xdp_scrub_frame(frame: xdpf); |
652 | |
653 | return skb; |
654 | } |
655 | EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame); |
656 | |
657 | struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, |
658 | struct net_device *dev) |
659 | { |
660 | struct sk_buff *skb; |
661 | |
662 | skb = kmem_cache_alloc(cachep: net_hotdata.skbuff_cache, GFP_ATOMIC); |
663 | if (unlikely(!skb)) |
664 | return NULL; |
665 | |
666 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
667 | |
668 | return __xdp_build_skb_from_frame(xdpf, skb, dev); |
669 | } |
670 | EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame); |
671 | |
672 | struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) |
673 | { |
674 | unsigned int headroom, totalsize; |
675 | struct xdp_frame *nxdpf; |
676 | struct page *page; |
677 | void *addr; |
678 | |
679 | headroom = xdpf->headroom + sizeof(*xdpf); |
680 | totalsize = headroom + xdpf->len; |
681 | |
682 | if (unlikely(totalsize > PAGE_SIZE)) |
683 | return NULL; |
684 | page = dev_alloc_page(); |
685 | if (!page) |
686 | return NULL; |
687 | addr = page_to_virt(page); |
688 | |
689 | memcpy(addr, xdpf, totalsize); |
690 | |
691 | nxdpf = addr; |
692 | nxdpf->data = addr + headroom; |
693 | nxdpf->frame_sz = PAGE_SIZE; |
694 | nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0; |
695 | nxdpf->mem.id = 0; |
696 | |
697 | return nxdpf; |
698 | } |
699 | |
700 | __bpf_kfunc_start_defs(); |
701 | |
702 | /** |
703 | * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. |
704 | * @ctx: XDP context pointer. |
705 | * @timestamp: Return value pointer. |
706 | * |
707 | * Return: |
708 | * * Returns 0 on success or ``-errno`` on error. |
709 | * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc |
710 | * * ``-ENODATA`` : means no RX-timestamp available for this frame |
711 | */ |
712 | __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) |
713 | { |
714 | return -EOPNOTSUPP; |
715 | } |
716 | |
717 | /** |
718 | * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash. |
719 | * @ctx: XDP context pointer. |
720 | * @hash: Return value pointer. |
721 | * @rss_type: Return value pointer for RSS type. |
722 | * |
723 | * The RSS hash type (@rss_type) specifies what portion of packet headers NIC |
724 | * hardware used when calculating RSS hash value. The RSS type can be decoded |
725 | * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits |
726 | * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types* |
727 | * ``XDP_RSS_TYPE_L*``. |
728 | * |
729 | * Return: |
730 | * * Returns 0 on success or ``-errno`` on error. |
731 | * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc |
732 | * * ``-ENODATA`` : means no RX-hash available for this frame |
733 | */ |
734 | __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, |
735 | enum xdp_rss_hash_type *) |
736 | { |
737 | return -EOPNOTSUPP; |
738 | } |
739 | |
740 | /** |
741 | * bpf_xdp_metadata_rx_vlan_tag - Get XDP packet outermost VLAN tag |
742 | * @ctx: XDP context pointer. |
743 | * @vlan_proto: Destination pointer for VLAN Tag protocol identifier (TPID). |
744 | * @vlan_tci: Destination pointer for VLAN TCI (VID + DEI + PCP) |
745 | * |
746 | * In case of success, ``vlan_proto`` contains *Tag protocol identifier (TPID)*, |
747 | * usually ``ETH_P_8021Q`` or ``ETH_P_8021AD``, but some networks can use |
748 | * custom TPIDs. ``vlan_proto`` is stored in **network byte order (BE)** |
749 | * and should be used as follows: |
750 | * ``if (vlan_proto == bpf_htons(ETH_P_8021Q)) do_something();`` |
751 | * |
752 | * ``vlan_tci`` contains the remaining 16 bits of a VLAN tag. |
753 | * Driver is expected to provide those in **host byte order (usually LE)**, |
754 | * so the bpf program should not perform byte conversion. |
755 | * According to 802.1Q standard, *VLAN TCI (Tag control information)* |
756 | * is a bit field that contains: |
757 | * *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``, |
758 | * *Drop eligible indicator (DEI)* - 1 bit, |
759 | * *Priority code point (PCP)* - 3 bits. |
760 | * For detailed meaning of DEI and PCP, please refer to other sources. |
761 | * |
762 | * Return: |
763 | * * Returns 0 on success or ``-errno`` on error. |
764 | * * ``-EOPNOTSUPP`` : device driver doesn't implement kfunc |
765 | * * ``-ENODATA`` : VLAN tag was not stripped or is not available |
766 | */ |
767 | __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx, |
768 | __be16 *vlan_proto, u16 *vlan_tci) |
769 | { |
770 | return -EOPNOTSUPP; |
771 | } |
772 | |
773 | __bpf_kfunc_end_defs(); |
774 | |
775 | BTF_KFUNCS_START(xdp_metadata_kfunc_ids) |
776 | #define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS) |
777 | XDP_METADATA_KFUNC_xxx |
778 | #undef XDP_METADATA_KFUNC |
779 | BTF_KFUNCS_END(xdp_metadata_kfunc_ids) |
780 | |
781 | static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { |
782 | .owner = THIS_MODULE, |
783 | .set = &xdp_metadata_kfunc_ids, |
784 | }; |
785 | |
786 | BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted) |
787 | #define XDP_METADATA_KFUNC(name, _, str, __) BTF_ID(func, str) |
788 | XDP_METADATA_KFUNC_xxx |
789 | #undef XDP_METADATA_KFUNC |
790 | |
791 | u32 bpf_xdp_metadata_kfunc_id(int id) |
792 | { |
793 | /* xdp_metadata_kfunc_ids is sorted and can't be used */ |
794 | return xdp_metadata_kfunc_ids_unsorted[id]; |
795 | } |
796 | |
797 | bool bpf_dev_bound_kfunc_id(u32 btf_id) |
798 | { |
799 | return btf_id_set8_contains(set: &xdp_metadata_kfunc_ids, id: btf_id); |
800 | } |
801 | |
802 | static int __init xdp_metadata_init(void) |
803 | { |
804 | return register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_XDP, s: &xdp_metadata_kfunc_set); |
805 | } |
806 | late_initcall(xdp_metadata_init); |
807 | |
808 | void xdp_set_features_flag(struct net_device *dev, xdp_features_t val) |
809 | { |
810 | val &= NETDEV_XDP_ACT_MASK; |
811 | if (dev->xdp_features == val) |
812 | return; |
813 | |
814 | dev->xdp_features = val; |
815 | |
816 | if (dev->reg_state == NETREG_REGISTERED) |
817 | call_netdevice_notifiers(val: NETDEV_XDP_FEAT_CHANGE, dev); |
818 | } |
819 | EXPORT_SYMBOL_GPL(xdp_set_features_flag); |
820 | |
821 | void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) |
822 | { |
823 | xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT); |
824 | |
825 | if (support_sg) |
826 | val |= NETDEV_XDP_ACT_NDO_XMIT_SG; |
827 | xdp_set_features_flag(dev, val); |
828 | } |
829 | EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target); |
830 | |
831 | void xdp_features_clear_redirect_target(struct net_device *dev) |
832 | { |
833 | xdp_features_t val = dev->xdp_features; |
834 | |
835 | val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG); |
836 | xdp_set_features_flag(dev, val); |
837 | } |
838 | EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target); |
839 | |