1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * drivers/net/veth.c |
4 | * |
5 | * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc |
6 | * |
7 | * Author: Pavel Emelianov <xemul@openvz.org> |
8 | * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> |
9 | * |
10 | */ |
11 | |
12 | #include <linux/netdevice.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/ethtool.h> |
15 | #include <linux/etherdevice.h> |
16 | #include <linux/u64_stats_sync.h> |
17 | |
18 | #include <net/rtnetlink.h> |
19 | #include <net/dst.h> |
20 | #include <net/xfrm.h> |
21 | #include <net/xdp.h> |
22 | #include <linux/veth.h> |
23 | #include <linux/module.h> |
24 | #include <linux/bpf.h> |
25 | #include <linux/filter.h> |
26 | #include <linux/ptr_ring.h> |
27 | #include <linux/bpf_trace.h> |
28 | #include <linux/net_tstamp.h> |
29 | #include <net/page_pool/helpers.h> |
30 | |
31 | #define DRV_NAME "veth" |
32 | #define DRV_VERSION "1.0" |
33 | |
34 | #define VETH_XDP_FLAG BIT(0) |
35 | #define VETH_RING_SIZE 256 |
36 | #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) |
37 | |
38 | #define VETH_XDP_TX_BULK_SIZE 16 |
39 | #define VETH_XDP_BATCH 16 |
40 | |
41 | struct veth_stats { |
42 | u64 rx_drops; |
43 | /* xdp */ |
44 | u64 xdp_packets; |
45 | u64 xdp_bytes; |
46 | u64 xdp_redirect; |
47 | u64 xdp_drops; |
48 | u64 xdp_tx; |
49 | u64 xdp_tx_err; |
50 | u64 peer_tq_xdp_xmit; |
51 | u64 peer_tq_xdp_xmit_err; |
52 | }; |
53 | |
54 | struct veth_rq_stats { |
55 | struct veth_stats vs; |
56 | struct u64_stats_sync syncp; |
57 | }; |
58 | |
59 | struct veth_rq { |
60 | struct napi_struct xdp_napi; |
61 | struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */ |
62 | struct net_device *dev; |
63 | struct bpf_prog __rcu *xdp_prog; |
64 | struct xdp_mem_info xdp_mem; |
65 | struct veth_rq_stats stats; |
66 | bool rx_notify_masked; |
67 | struct ptr_ring xdp_ring; |
68 | struct xdp_rxq_info xdp_rxq; |
69 | struct page_pool *page_pool; |
70 | }; |
71 | |
72 | struct veth_priv { |
73 | struct net_device __rcu *peer; |
74 | atomic64_t dropped; |
75 | struct bpf_prog *_xdp_prog; |
76 | struct veth_rq *rq; |
77 | unsigned int requested_headroom; |
78 | }; |
79 | |
80 | struct veth_xdp_tx_bq { |
81 | struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE]; |
82 | unsigned int count; |
83 | }; |
84 | |
85 | /* |
86 | * ethtool interface |
87 | */ |
88 | |
89 | struct veth_q_stat_desc { |
90 | char desc[ETH_GSTRING_LEN]; |
91 | size_t offset; |
92 | }; |
93 | |
94 | #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m) |
95 | |
96 | static const struct veth_q_stat_desc veth_rq_stats_desc[] = { |
97 | { "xdp_packets" , VETH_RQ_STAT(xdp_packets) }, |
98 | { "xdp_bytes" , VETH_RQ_STAT(xdp_bytes) }, |
99 | { "drops" , VETH_RQ_STAT(rx_drops) }, |
100 | { "xdp_redirect" , VETH_RQ_STAT(xdp_redirect) }, |
101 | { "xdp_drops" , VETH_RQ_STAT(xdp_drops) }, |
102 | { "xdp_tx" , VETH_RQ_STAT(xdp_tx) }, |
103 | { "xdp_tx_errors" , VETH_RQ_STAT(xdp_tx_err) }, |
104 | }; |
105 | |
106 | #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc) |
107 | |
108 | static const struct veth_q_stat_desc veth_tq_stats_desc[] = { |
109 | { "xdp_xmit" , VETH_RQ_STAT(peer_tq_xdp_xmit) }, |
110 | { "xdp_xmit_errors" , VETH_RQ_STAT(peer_tq_xdp_xmit_err) }, |
111 | }; |
112 | |
113 | #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc) |
114 | |
115 | static struct { |
116 | const char string[ETH_GSTRING_LEN]; |
117 | } ethtool_stats_keys[] = { |
118 | { "peer_ifindex" }, |
119 | }; |
120 | |
121 | struct veth_xdp_buff { |
122 | struct xdp_buff xdp; |
123 | struct sk_buff *skb; |
124 | }; |
125 | |
126 | static int veth_get_link_ksettings(struct net_device *dev, |
127 | struct ethtool_link_ksettings *cmd) |
128 | { |
129 | cmd->base.speed = SPEED_10000; |
130 | cmd->base.duplex = DUPLEX_FULL; |
131 | cmd->base.port = PORT_TP; |
132 | cmd->base.autoneg = AUTONEG_DISABLE; |
133 | return 0; |
134 | } |
135 | |
136 | static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
137 | { |
138 | strscpy(info->driver, DRV_NAME, sizeof(info->driver)); |
139 | strscpy(info->version, DRV_VERSION, sizeof(info->version)); |
140 | } |
141 | |
142 | static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) |
143 | { |
144 | u8 *p = buf; |
145 | int i, j; |
146 | |
147 | switch(stringset) { |
148 | case ETH_SS_STATS: |
149 | memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys)); |
150 | p += sizeof(ethtool_stats_keys); |
151 | for (i = 0; i < dev->real_num_rx_queues; i++) |
152 | for (j = 0; j < VETH_RQ_STATS_LEN; j++) |
153 | ethtool_sprintf(data: &p, fmt: "rx_queue_%u_%.18s" , |
154 | i, veth_rq_stats_desc[j].desc); |
155 | |
156 | for (i = 0; i < dev->real_num_tx_queues; i++) |
157 | for (j = 0; j < VETH_TQ_STATS_LEN; j++) |
158 | ethtool_sprintf(data: &p, fmt: "tx_queue_%u_%.18s" , |
159 | i, veth_tq_stats_desc[j].desc); |
160 | |
161 | page_pool_ethtool_stats_get_strings(data: p); |
162 | break; |
163 | } |
164 | } |
165 | |
166 | static int veth_get_sset_count(struct net_device *dev, int sset) |
167 | { |
168 | switch (sset) { |
169 | case ETH_SS_STATS: |
170 | return ARRAY_SIZE(ethtool_stats_keys) + |
171 | VETH_RQ_STATS_LEN * dev->real_num_rx_queues + |
172 | VETH_TQ_STATS_LEN * dev->real_num_tx_queues + |
173 | page_pool_ethtool_stats_get_count(); |
174 | default: |
175 | return -EOPNOTSUPP; |
176 | } |
177 | } |
178 | |
179 | static void veth_get_page_pool_stats(struct net_device *dev, u64 *data) |
180 | { |
181 | #ifdef CONFIG_PAGE_POOL_STATS |
182 | struct veth_priv *priv = netdev_priv(dev); |
183 | struct page_pool_stats pp_stats = {}; |
184 | int i; |
185 | |
186 | for (i = 0; i < dev->real_num_rx_queues; i++) { |
187 | if (!priv->rq[i].page_pool) |
188 | continue; |
189 | page_pool_get_stats(pool: priv->rq[i].page_pool, stats: &pp_stats); |
190 | } |
191 | page_pool_ethtool_stats_get(data, stats: &pp_stats); |
192 | #endif /* CONFIG_PAGE_POOL_STATS */ |
193 | } |
194 | |
195 | static void veth_get_ethtool_stats(struct net_device *dev, |
196 | struct ethtool_stats *stats, u64 *data) |
197 | { |
198 | struct veth_priv *rcv_priv, *priv = netdev_priv(dev); |
199 | struct net_device *peer = rtnl_dereference(priv->peer); |
200 | int i, j, idx, pp_idx; |
201 | |
202 | data[0] = peer ? peer->ifindex : 0; |
203 | idx = 1; |
204 | for (i = 0; i < dev->real_num_rx_queues; i++) { |
205 | const struct veth_rq_stats *rq_stats = &priv->rq[i].stats; |
206 | const void *stats_base = (void *)&rq_stats->vs; |
207 | unsigned int start; |
208 | size_t offset; |
209 | |
210 | do { |
211 | start = u64_stats_fetch_begin(syncp: &rq_stats->syncp); |
212 | for (j = 0; j < VETH_RQ_STATS_LEN; j++) { |
213 | offset = veth_rq_stats_desc[j].offset; |
214 | data[idx + j] = *(u64 *)(stats_base + offset); |
215 | } |
216 | } while (u64_stats_fetch_retry(syncp: &rq_stats->syncp, start)); |
217 | idx += VETH_RQ_STATS_LEN; |
218 | } |
219 | pp_idx = idx; |
220 | |
221 | if (!peer) |
222 | goto page_pool_stats; |
223 | |
224 | rcv_priv = netdev_priv(dev: peer); |
225 | for (i = 0; i < peer->real_num_rx_queues; i++) { |
226 | const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats; |
227 | const void *base = (void *)&rq_stats->vs; |
228 | unsigned int start, tx_idx = idx; |
229 | size_t offset; |
230 | |
231 | tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN; |
232 | do { |
233 | start = u64_stats_fetch_begin(syncp: &rq_stats->syncp); |
234 | for (j = 0; j < VETH_TQ_STATS_LEN; j++) { |
235 | offset = veth_tq_stats_desc[j].offset; |
236 | data[tx_idx + j] += *(u64 *)(base + offset); |
237 | } |
238 | } while (u64_stats_fetch_retry(syncp: &rq_stats->syncp, start)); |
239 | } |
240 | pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN; |
241 | |
242 | page_pool_stats: |
243 | veth_get_page_pool_stats(dev, data: &data[pp_idx]); |
244 | } |
245 | |
246 | static void veth_get_channels(struct net_device *dev, |
247 | struct ethtool_channels *channels) |
248 | { |
249 | channels->tx_count = dev->real_num_tx_queues; |
250 | channels->rx_count = dev->real_num_rx_queues; |
251 | channels->max_tx = dev->num_tx_queues; |
252 | channels->max_rx = dev->num_rx_queues; |
253 | } |
254 | |
255 | static int veth_set_channels(struct net_device *dev, |
256 | struct ethtool_channels *ch); |
257 | |
258 | static const struct ethtool_ops veth_ethtool_ops = { |
259 | .get_drvinfo = veth_get_drvinfo, |
260 | .get_link = ethtool_op_get_link, |
261 | .get_strings = veth_get_strings, |
262 | .get_sset_count = veth_get_sset_count, |
263 | .get_ethtool_stats = veth_get_ethtool_stats, |
264 | .get_link_ksettings = veth_get_link_ksettings, |
265 | .get_ts_info = ethtool_op_get_ts_info, |
266 | .get_channels = veth_get_channels, |
267 | .set_channels = veth_set_channels, |
268 | }; |
269 | |
270 | /* general routines */ |
271 | |
272 | static bool veth_is_xdp_frame(void *ptr) |
273 | { |
274 | return (unsigned long)ptr & VETH_XDP_FLAG; |
275 | } |
276 | |
277 | static struct xdp_frame *veth_ptr_to_xdp(void *ptr) |
278 | { |
279 | return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); |
280 | } |
281 | |
282 | static void *veth_xdp_to_ptr(struct xdp_frame *xdp) |
283 | { |
284 | return (void *)((unsigned long)xdp | VETH_XDP_FLAG); |
285 | } |
286 | |
287 | static void veth_ptr_free(void *ptr) |
288 | { |
289 | if (veth_is_xdp_frame(ptr)) |
290 | xdp_return_frame(xdpf: veth_ptr_to_xdp(ptr)); |
291 | else |
292 | kfree_skb(skb: ptr); |
293 | } |
294 | |
295 | static void __veth_xdp_flush(struct veth_rq *rq) |
296 | { |
297 | /* Write ptr_ring before reading rx_notify_masked */ |
298 | smp_mb(); |
299 | if (!READ_ONCE(rq->rx_notify_masked) && |
300 | napi_schedule_prep(n: &rq->xdp_napi)) { |
301 | WRITE_ONCE(rq->rx_notify_masked, true); |
302 | __napi_schedule(n: &rq->xdp_napi); |
303 | } |
304 | } |
305 | |
306 | static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb) |
307 | { |
308 | if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { |
309 | dev_kfree_skb_any(skb); |
310 | return NET_RX_DROP; |
311 | } |
312 | |
313 | return NET_RX_SUCCESS; |
314 | } |
315 | |
316 | static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, |
317 | struct veth_rq *rq, bool xdp) |
318 | { |
319 | return __dev_forward_skb(dev, skb) ?: xdp ? |
320 | veth_xdp_rx(rq, skb) : |
321 | __netif_rx(skb); |
322 | } |
323 | |
324 | /* return true if the specified skb has chances of GRO aggregation |
325 | * Don't strive for accuracy, but try to avoid GRO overhead in the most |
326 | * common scenarios. |
327 | * When XDP is enabled, all traffic is considered eligible, as the xmit |
328 | * device has TSO off. |
329 | * When TSO is enabled on the xmit device, we are likely interested only |
330 | * in UDP aggregation, explicitly check for that if the skb is suspected |
331 | * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets - |
332 | * to belong to locally generated UDP traffic. |
333 | */ |
334 | static bool veth_skb_is_eligible_for_gro(const struct net_device *dev, |
335 | const struct net_device *rcv, |
336 | const struct sk_buff *skb) |
337 | { |
338 | return !(dev->features & NETIF_F_ALL_TSO) || |
339 | (skb->destructor == sock_wfree && |
340 | rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)); |
341 | } |
342 | |
343 | static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) |
344 | { |
345 | struct veth_priv *rcv_priv, *priv = netdev_priv(dev); |
346 | struct veth_rq *rq = NULL; |
347 | int ret = NETDEV_TX_OK; |
348 | struct net_device *rcv; |
349 | int length = skb->len; |
350 | bool use_napi = false; |
351 | int rxq; |
352 | |
353 | rcu_read_lock(); |
354 | rcv = rcu_dereference(priv->peer); |
355 | if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { |
356 | kfree_skb(skb); |
357 | goto drop; |
358 | } |
359 | |
360 | rcv_priv = netdev_priv(dev: rcv); |
361 | rxq = skb_get_queue_mapping(skb); |
362 | if (rxq < rcv->real_num_rx_queues) { |
363 | rq = &rcv_priv->rq[rxq]; |
364 | |
365 | /* The napi pointer is available when an XDP program is |
366 | * attached or when GRO is enabled |
367 | * Don't bother with napi/GRO if the skb can't be aggregated |
368 | */ |
369 | use_napi = rcu_access_pointer(rq->napi) && |
370 | veth_skb_is_eligible_for_gro(dev, rcv, skb); |
371 | } |
372 | |
373 | skb_tx_timestamp(skb); |
374 | if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { |
375 | if (!use_napi) |
376 | dev_sw_netstats_tx_add(dev, packets: 1, len: length); |
377 | else |
378 | __veth_xdp_flush(rq); |
379 | } else { |
380 | drop: |
381 | atomic64_inc(v: &priv->dropped); |
382 | ret = NET_XMIT_DROP; |
383 | } |
384 | |
385 | rcu_read_unlock(); |
386 | |
387 | return ret; |
388 | } |
389 | |
390 | static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) |
391 | { |
392 | struct veth_priv *priv = netdev_priv(dev); |
393 | int i; |
394 | |
395 | result->peer_tq_xdp_xmit_err = 0; |
396 | result->xdp_packets = 0; |
397 | result->xdp_tx_err = 0; |
398 | result->xdp_bytes = 0; |
399 | result->rx_drops = 0; |
400 | for (i = 0; i < dev->num_rx_queues; i++) { |
401 | u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err; |
402 | struct veth_rq_stats *stats = &priv->rq[i].stats; |
403 | unsigned int start; |
404 | |
405 | do { |
406 | start = u64_stats_fetch_begin(syncp: &stats->syncp); |
407 | peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; |
408 | xdp_tx_err = stats->vs.xdp_tx_err; |
409 | packets = stats->vs.xdp_packets; |
410 | bytes = stats->vs.xdp_bytes; |
411 | drops = stats->vs.rx_drops; |
412 | } while (u64_stats_fetch_retry(syncp: &stats->syncp, start)); |
413 | result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; |
414 | result->xdp_tx_err += xdp_tx_err; |
415 | result->xdp_packets += packets; |
416 | result->xdp_bytes += bytes; |
417 | result->rx_drops += drops; |
418 | } |
419 | } |
420 | |
421 | static void veth_get_stats64(struct net_device *dev, |
422 | struct rtnl_link_stats64 *tot) |
423 | { |
424 | struct veth_priv *priv = netdev_priv(dev); |
425 | struct net_device *peer; |
426 | struct veth_stats rx; |
427 | |
428 | tot->tx_dropped = atomic64_read(v: &priv->dropped); |
429 | dev_fetch_sw_netstats(s: tot, netstats: dev->tstats); |
430 | |
431 | veth_stats_rx(result: &rx, dev); |
432 | tot->tx_dropped += rx.xdp_tx_err; |
433 | tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; |
434 | tot->rx_bytes += rx.xdp_bytes; |
435 | tot->rx_packets += rx.xdp_packets; |
436 | |
437 | rcu_read_lock(); |
438 | peer = rcu_dereference(priv->peer); |
439 | if (peer) { |
440 | struct rtnl_link_stats64 tot_peer = {}; |
441 | |
442 | dev_fetch_sw_netstats(s: &tot_peer, netstats: peer->tstats); |
443 | tot->rx_bytes += tot_peer.tx_bytes; |
444 | tot->rx_packets += tot_peer.tx_packets; |
445 | |
446 | veth_stats_rx(result: &rx, dev: peer); |
447 | tot->tx_dropped += rx.peer_tq_xdp_xmit_err; |
448 | tot->rx_dropped += rx.xdp_tx_err; |
449 | tot->tx_bytes += rx.xdp_bytes; |
450 | tot->tx_packets += rx.xdp_packets; |
451 | } |
452 | rcu_read_unlock(); |
453 | } |
454 | |
455 | /* fake multicast ability */ |
456 | static void veth_set_multicast_list(struct net_device *dev) |
457 | { |
458 | } |
459 | |
460 | static int veth_select_rxq(struct net_device *dev) |
461 | { |
462 | return smp_processor_id() % dev->real_num_rx_queues; |
463 | } |
464 | |
465 | static struct net_device *veth_peer_dev(struct net_device *dev) |
466 | { |
467 | struct veth_priv *priv = netdev_priv(dev); |
468 | |
469 | /* Callers must be under RCU read side. */ |
470 | return rcu_dereference(priv->peer); |
471 | } |
472 | |
473 | static int veth_xdp_xmit(struct net_device *dev, int n, |
474 | struct xdp_frame **frames, |
475 | u32 flags, bool ndo_xmit) |
476 | { |
477 | struct veth_priv *rcv_priv, *priv = netdev_priv(dev); |
478 | int i, ret = -ENXIO, nxmit = 0; |
479 | struct net_device *rcv; |
480 | unsigned int max_len; |
481 | struct veth_rq *rq; |
482 | |
483 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
484 | return -EINVAL; |
485 | |
486 | rcu_read_lock(); |
487 | rcv = rcu_dereference(priv->peer); |
488 | if (unlikely(!rcv)) |
489 | goto out; |
490 | |
491 | rcv_priv = netdev_priv(dev: rcv); |
492 | rq = &rcv_priv->rq[veth_select_rxq(dev: rcv)]; |
493 | /* The napi pointer is set if NAPI is enabled, which ensures that |
494 | * xdp_ring is initialized on receive side and the peer device is up. |
495 | */ |
496 | if (!rcu_access_pointer(rq->napi)) |
497 | goto out; |
498 | |
499 | max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; |
500 | |
501 | spin_lock(lock: &rq->xdp_ring.producer_lock); |
502 | for (i = 0; i < n; i++) { |
503 | struct xdp_frame *frame = frames[i]; |
504 | void *ptr = veth_xdp_to_ptr(xdp: frame); |
505 | |
506 | if (unlikely(xdp_get_frame_len(frame) > max_len || |
507 | __ptr_ring_produce(&rq->xdp_ring, ptr))) |
508 | break; |
509 | nxmit++; |
510 | } |
511 | spin_unlock(lock: &rq->xdp_ring.producer_lock); |
512 | |
513 | if (flags & XDP_XMIT_FLUSH) |
514 | __veth_xdp_flush(rq); |
515 | |
516 | ret = nxmit; |
517 | if (ndo_xmit) { |
518 | u64_stats_update_begin(syncp: &rq->stats.syncp); |
519 | rq->stats.vs.peer_tq_xdp_xmit += nxmit; |
520 | rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit; |
521 | u64_stats_update_end(syncp: &rq->stats.syncp); |
522 | } |
523 | |
524 | out: |
525 | rcu_read_unlock(); |
526 | |
527 | return ret; |
528 | } |
529 | |
530 | static int veth_ndo_xdp_xmit(struct net_device *dev, int n, |
531 | struct xdp_frame **frames, u32 flags) |
532 | { |
533 | int err; |
534 | |
535 | err = veth_xdp_xmit(dev, n, frames, flags, ndo_xmit: true); |
536 | if (err < 0) { |
537 | struct veth_priv *priv = netdev_priv(dev); |
538 | |
539 | atomic64_add(i: n, v: &priv->dropped); |
540 | } |
541 | |
542 | return err; |
543 | } |
544 | |
545 | static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) |
546 | { |
547 | int sent, i, err = 0, drops; |
548 | |
549 | sent = veth_xdp_xmit(dev: rq->dev, n: bq->count, frames: bq->q, flags: 0, ndo_xmit: false); |
550 | if (sent < 0) { |
551 | err = sent; |
552 | sent = 0; |
553 | } |
554 | |
555 | for (i = sent; unlikely(i < bq->count); i++) |
556 | xdp_return_frame(xdpf: bq->q[i]); |
557 | |
558 | drops = bq->count - sent; |
559 | trace_xdp_bulk_tx(dev: rq->dev, sent, drops, err); |
560 | |
561 | u64_stats_update_begin(syncp: &rq->stats.syncp); |
562 | rq->stats.vs.xdp_tx += sent; |
563 | rq->stats.vs.xdp_tx_err += drops; |
564 | u64_stats_update_end(syncp: &rq->stats.syncp); |
565 | |
566 | bq->count = 0; |
567 | } |
568 | |
569 | static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) |
570 | { |
571 | struct veth_priv *rcv_priv, *priv = netdev_priv(dev: rq->dev); |
572 | struct net_device *rcv; |
573 | struct veth_rq *rcv_rq; |
574 | |
575 | rcu_read_lock(); |
576 | veth_xdp_flush_bq(rq, bq); |
577 | rcv = rcu_dereference(priv->peer); |
578 | if (unlikely(!rcv)) |
579 | goto out; |
580 | |
581 | rcv_priv = netdev_priv(dev: rcv); |
582 | rcv_rq = &rcv_priv->rq[veth_select_rxq(dev: rcv)]; |
583 | /* xdp_ring is initialized on receive side? */ |
584 | if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog))) |
585 | goto out; |
586 | |
587 | __veth_xdp_flush(rq: rcv_rq); |
588 | out: |
589 | rcu_read_unlock(); |
590 | } |
591 | |
592 | static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp, |
593 | struct veth_xdp_tx_bq *bq) |
594 | { |
595 | struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); |
596 | |
597 | if (unlikely(!frame)) |
598 | return -EOVERFLOW; |
599 | |
600 | if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE)) |
601 | veth_xdp_flush_bq(rq, bq); |
602 | |
603 | bq->q[bq->count++] = frame; |
604 | |
605 | return 0; |
606 | } |
607 | |
608 | static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq, |
609 | struct xdp_frame *frame, |
610 | struct veth_xdp_tx_bq *bq, |
611 | struct veth_stats *stats) |
612 | { |
613 | struct xdp_frame orig_frame; |
614 | struct bpf_prog *xdp_prog; |
615 | |
616 | rcu_read_lock(); |
617 | xdp_prog = rcu_dereference(rq->xdp_prog); |
618 | if (likely(xdp_prog)) { |
619 | struct veth_xdp_buff vxbuf; |
620 | struct xdp_buff *xdp = &vxbuf.xdp; |
621 | u32 act; |
622 | |
623 | xdp_convert_frame_to_buff(frame, xdp); |
624 | xdp->rxq = &rq->xdp_rxq; |
625 | vxbuf.skb = NULL; |
626 | |
627 | act = bpf_prog_run_xdp(prog: xdp_prog, xdp); |
628 | |
629 | switch (act) { |
630 | case XDP_PASS: |
631 | if (xdp_update_frame_from_buff(xdp, xdp_frame: frame)) |
632 | goto err_xdp; |
633 | break; |
634 | case XDP_TX: |
635 | orig_frame = *frame; |
636 | xdp->rxq->mem = frame->mem; |
637 | if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { |
638 | trace_xdp_exception(dev: rq->dev, xdp: xdp_prog, act); |
639 | frame = &orig_frame; |
640 | stats->rx_drops++; |
641 | goto err_xdp; |
642 | } |
643 | stats->xdp_tx++; |
644 | rcu_read_unlock(); |
645 | goto xdp_xmit; |
646 | case XDP_REDIRECT: |
647 | orig_frame = *frame; |
648 | xdp->rxq->mem = frame->mem; |
649 | if (xdp_do_redirect(dev: rq->dev, xdp, prog: xdp_prog)) { |
650 | frame = &orig_frame; |
651 | stats->rx_drops++; |
652 | goto err_xdp; |
653 | } |
654 | stats->xdp_redirect++; |
655 | rcu_read_unlock(); |
656 | goto xdp_xmit; |
657 | default: |
658 | bpf_warn_invalid_xdp_action(dev: rq->dev, prog: xdp_prog, act); |
659 | fallthrough; |
660 | case XDP_ABORTED: |
661 | trace_xdp_exception(dev: rq->dev, xdp: xdp_prog, act); |
662 | fallthrough; |
663 | case XDP_DROP: |
664 | stats->xdp_drops++; |
665 | goto err_xdp; |
666 | } |
667 | } |
668 | rcu_read_unlock(); |
669 | |
670 | return frame; |
671 | err_xdp: |
672 | rcu_read_unlock(); |
673 | xdp_return_frame(xdpf: frame); |
674 | xdp_xmit: |
675 | return NULL; |
676 | } |
677 | |
678 | /* frames array contains VETH_XDP_BATCH at most */ |
679 | static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames, |
680 | int n_xdpf, struct veth_xdp_tx_bq *bq, |
681 | struct veth_stats *stats) |
682 | { |
683 | void *skbs[VETH_XDP_BATCH]; |
684 | int i; |
685 | |
686 | if (xdp_alloc_skb_bulk(skbs, n_skb: n_xdpf, |
687 | GFP_ATOMIC | __GFP_ZERO) < 0) { |
688 | for (i = 0; i < n_xdpf; i++) |
689 | xdp_return_frame(xdpf: frames[i]); |
690 | stats->rx_drops += n_xdpf; |
691 | |
692 | return; |
693 | } |
694 | |
695 | for (i = 0; i < n_xdpf; i++) { |
696 | struct sk_buff *skb = skbs[i]; |
697 | |
698 | skb = __xdp_build_skb_from_frame(xdpf: frames[i], skb, |
699 | dev: rq->dev); |
700 | if (!skb) { |
701 | xdp_return_frame(xdpf: frames[i]); |
702 | stats->rx_drops++; |
703 | continue; |
704 | } |
705 | napi_gro_receive(napi: &rq->xdp_napi, skb); |
706 | } |
707 | } |
708 | |
709 | static void veth_xdp_get(struct xdp_buff *xdp) |
710 | { |
711 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); |
712 | int i; |
713 | |
714 | get_page(virt_to_page(xdp->data)); |
715 | if (likely(!xdp_buff_has_frags(xdp))) |
716 | return; |
717 | |
718 | for (i = 0; i < sinfo->nr_frags; i++) |
719 | __skb_frag_ref(frag: &sinfo->frags[i]); |
720 | } |
721 | |
722 | static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, |
723 | struct xdp_buff *xdp, |
724 | struct sk_buff **pskb) |
725 | { |
726 | struct sk_buff *skb = *pskb; |
727 | u32 frame_sz; |
728 | |
729 | if (skb_shared(skb) || skb_head_is_locked(skb) || |
730 | skb_shinfo(skb)->nr_frags || |
731 | skb_headroom(skb) < XDP_PACKET_HEADROOM) { |
732 | if (skb_pp_cow_data(pool: rq->page_pool, pskb, XDP_PACKET_HEADROOM)) |
733 | goto drop; |
734 | |
735 | skb = *pskb; |
736 | } |
737 | |
738 | /* SKB "head" area always have tailroom for skb_shared_info */ |
739 | frame_sz = skb_end_pointer(skb) - skb->head; |
740 | frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
741 | xdp_init_buff(xdp, frame_sz, rxq: &rq->xdp_rxq); |
742 | xdp_prepare_buff(xdp, hard_start: skb->head, headroom: skb_headroom(skb), |
743 | data_len: skb_headlen(skb), meta_valid: true); |
744 | |
745 | if (skb_is_nonlinear(skb)) { |
746 | skb_shinfo(skb)->xdp_frags_size = skb->data_len; |
747 | xdp_buff_set_frags_flag(xdp); |
748 | } else { |
749 | xdp_buff_clear_frags_flag(xdp); |
750 | } |
751 | *pskb = skb; |
752 | |
753 | return 0; |
754 | drop: |
755 | consume_skb(skb); |
756 | *pskb = NULL; |
757 | |
758 | return -ENOMEM; |
759 | } |
760 | |
761 | static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, |
762 | struct sk_buff *skb, |
763 | struct veth_xdp_tx_bq *bq, |
764 | struct veth_stats *stats) |
765 | { |
766 | void *orig_data, *orig_data_end; |
767 | struct bpf_prog *xdp_prog; |
768 | struct veth_xdp_buff vxbuf; |
769 | struct xdp_buff *xdp = &vxbuf.xdp; |
770 | u32 act, metalen; |
771 | int off; |
772 | |
773 | skb_prepare_for_gro(skb); |
774 | |
775 | rcu_read_lock(); |
776 | xdp_prog = rcu_dereference(rq->xdp_prog); |
777 | if (unlikely(!xdp_prog)) { |
778 | rcu_read_unlock(); |
779 | goto out; |
780 | } |
781 | |
782 | __skb_push(skb, len: skb->data - skb_mac_header(skb)); |
783 | if (veth_convert_skb_to_xdp_buff(rq, xdp, pskb: &skb)) |
784 | goto drop; |
785 | vxbuf.skb = skb; |
786 | |
787 | orig_data = xdp->data; |
788 | orig_data_end = xdp->data_end; |
789 | |
790 | act = bpf_prog_run_xdp(prog: xdp_prog, xdp); |
791 | |
792 | switch (act) { |
793 | case XDP_PASS: |
794 | break; |
795 | case XDP_TX: |
796 | veth_xdp_get(xdp); |
797 | consume_skb(skb); |
798 | xdp->rxq->mem = rq->xdp_mem; |
799 | if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { |
800 | trace_xdp_exception(dev: rq->dev, xdp: xdp_prog, act); |
801 | stats->rx_drops++; |
802 | goto err_xdp; |
803 | } |
804 | stats->xdp_tx++; |
805 | rcu_read_unlock(); |
806 | goto xdp_xmit; |
807 | case XDP_REDIRECT: |
808 | veth_xdp_get(xdp); |
809 | consume_skb(skb); |
810 | xdp->rxq->mem = rq->xdp_mem; |
811 | if (xdp_do_redirect(dev: rq->dev, xdp, prog: xdp_prog)) { |
812 | stats->rx_drops++; |
813 | goto err_xdp; |
814 | } |
815 | stats->xdp_redirect++; |
816 | rcu_read_unlock(); |
817 | goto xdp_xmit; |
818 | default: |
819 | bpf_warn_invalid_xdp_action(dev: rq->dev, prog: xdp_prog, act); |
820 | fallthrough; |
821 | case XDP_ABORTED: |
822 | trace_xdp_exception(dev: rq->dev, xdp: xdp_prog, act); |
823 | fallthrough; |
824 | case XDP_DROP: |
825 | stats->xdp_drops++; |
826 | goto xdp_drop; |
827 | } |
828 | rcu_read_unlock(); |
829 | |
830 | /* check if bpf_xdp_adjust_head was used */ |
831 | off = orig_data - xdp->data; |
832 | if (off > 0) |
833 | __skb_push(skb, len: off); |
834 | else if (off < 0) |
835 | __skb_pull(skb, len: -off); |
836 | |
837 | skb_reset_mac_header(skb); |
838 | |
839 | /* check if bpf_xdp_adjust_tail was used */ |
840 | off = xdp->data_end - orig_data_end; |
841 | if (off != 0) |
842 | __skb_put(skb, len: off); /* positive on grow, negative on shrink */ |
843 | |
844 | /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers |
845 | * (e.g. bpf_xdp_adjust_tail), we need to update data_len here. |
846 | */ |
847 | if (xdp_buff_has_frags(xdp)) |
848 | skb->data_len = skb_shinfo(skb)->xdp_frags_size; |
849 | else |
850 | skb->data_len = 0; |
851 | |
852 | skb->protocol = eth_type_trans(skb, dev: rq->dev); |
853 | |
854 | metalen = xdp->data - xdp->data_meta; |
855 | if (metalen) |
856 | skb_metadata_set(skb, meta_len: metalen); |
857 | out: |
858 | return skb; |
859 | drop: |
860 | stats->rx_drops++; |
861 | xdp_drop: |
862 | rcu_read_unlock(); |
863 | kfree_skb(skb); |
864 | return NULL; |
865 | err_xdp: |
866 | rcu_read_unlock(); |
867 | xdp_return_buff(xdp); |
868 | xdp_xmit: |
869 | return NULL; |
870 | } |
871 | |
872 | static int veth_xdp_rcv(struct veth_rq *rq, int budget, |
873 | struct veth_xdp_tx_bq *bq, |
874 | struct veth_stats *stats) |
875 | { |
876 | int i, done = 0, n_xdpf = 0; |
877 | void *xdpf[VETH_XDP_BATCH]; |
878 | |
879 | for (i = 0; i < budget; i++) { |
880 | void *ptr = __ptr_ring_consume(r: &rq->xdp_ring); |
881 | |
882 | if (!ptr) |
883 | break; |
884 | |
885 | if (veth_is_xdp_frame(ptr)) { |
886 | /* ndo_xdp_xmit */ |
887 | struct xdp_frame *frame = veth_ptr_to_xdp(ptr); |
888 | |
889 | stats->xdp_bytes += xdp_get_frame_len(xdpf: frame); |
890 | frame = veth_xdp_rcv_one(rq, frame, bq, stats); |
891 | if (frame) { |
892 | /* XDP_PASS */ |
893 | xdpf[n_xdpf++] = frame; |
894 | if (n_xdpf == VETH_XDP_BATCH) { |
895 | veth_xdp_rcv_bulk_skb(rq, frames: xdpf, n_xdpf, |
896 | bq, stats); |
897 | n_xdpf = 0; |
898 | } |
899 | } |
900 | } else { |
901 | /* ndo_start_xmit */ |
902 | struct sk_buff *skb = ptr; |
903 | |
904 | stats->xdp_bytes += skb->len; |
905 | skb = veth_xdp_rcv_skb(rq, skb, bq, stats); |
906 | if (skb) { |
907 | if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC)) |
908 | netif_receive_skb(skb); |
909 | else |
910 | napi_gro_receive(napi: &rq->xdp_napi, skb); |
911 | } |
912 | } |
913 | done++; |
914 | } |
915 | |
916 | if (n_xdpf) |
917 | veth_xdp_rcv_bulk_skb(rq, frames: xdpf, n_xdpf, bq, stats); |
918 | |
919 | u64_stats_update_begin(syncp: &rq->stats.syncp); |
920 | rq->stats.vs.xdp_redirect += stats->xdp_redirect; |
921 | rq->stats.vs.xdp_bytes += stats->xdp_bytes; |
922 | rq->stats.vs.xdp_drops += stats->xdp_drops; |
923 | rq->stats.vs.rx_drops += stats->rx_drops; |
924 | rq->stats.vs.xdp_packets += done; |
925 | u64_stats_update_end(syncp: &rq->stats.syncp); |
926 | |
927 | return done; |
928 | } |
929 | |
930 | static int veth_poll(struct napi_struct *napi, int budget) |
931 | { |
932 | struct veth_rq *rq = |
933 | container_of(napi, struct veth_rq, xdp_napi); |
934 | struct veth_stats stats = {}; |
935 | struct veth_xdp_tx_bq bq; |
936 | int done; |
937 | |
938 | bq.count = 0; |
939 | |
940 | xdp_set_return_frame_no_direct(); |
941 | done = veth_xdp_rcv(rq, budget, bq: &bq, stats: &stats); |
942 | |
943 | if (stats.xdp_redirect > 0) |
944 | xdp_do_flush(); |
945 | |
946 | if (done < budget && napi_complete_done(n: napi, work_done: done)) { |
947 | /* Write rx_notify_masked before reading ptr_ring */ |
948 | smp_store_mb(rq->rx_notify_masked, false); |
949 | if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { |
950 | if (napi_schedule_prep(n: &rq->xdp_napi)) { |
951 | WRITE_ONCE(rq->rx_notify_masked, true); |
952 | __napi_schedule(n: &rq->xdp_napi); |
953 | } |
954 | } |
955 | } |
956 | |
957 | if (stats.xdp_tx > 0) |
958 | veth_xdp_flush(rq, bq: &bq); |
959 | xdp_clear_return_frame_no_direct(); |
960 | |
961 | return done; |
962 | } |
963 | |
964 | static int veth_create_page_pool(struct veth_rq *rq) |
965 | { |
966 | struct page_pool_params pp_params = { |
967 | .order = 0, |
968 | .pool_size = VETH_RING_SIZE, |
969 | .nid = NUMA_NO_NODE, |
970 | .dev = &rq->dev->dev, |
971 | }; |
972 | |
973 | rq->page_pool = page_pool_create(params: &pp_params); |
974 | if (IS_ERR(ptr: rq->page_pool)) { |
975 | int err = PTR_ERR(ptr: rq->page_pool); |
976 | |
977 | rq->page_pool = NULL; |
978 | return err; |
979 | } |
980 | |
981 | return 0; |
982 | } |
983 | |
984 | static int __veth_napi_enable_range(struct net_device *dev, int start, int end) |
985 | { |
986 | struct veth_priv *priv = netdev_priv(dev); |
987 | int err, i; |
988 | |
989 | for (i = start; i < end; i++) { |
990 | err = veth_create_page_pool(rq: &priv->rq[i]); |
991 | if (err) |
992 | goto err_page_pool; |
993 | } |
994 | |
995 | for (i = start; i < end; i++) { |
996 | struct veth_rq *rq = &priv->rq[i]; |
997 | |
998 | err = ptr_ring_init(r: &rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); |
999 | if (err) |
1000 | goto err_xdp_ring; |
1001 | } |
1002 | |
1003 | for (i = start; i < end; i++) { |
1004 | struct veth_rq *rq = &priv->rq[i]; |
1005 | |
1006 | napi_enable(n: &rq->xdp_napi); |
1007 | rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); |
1008 | } |
1009 | |
1010 | return 0; |
1011 | |
1012 | err_xdp_ring: |
1013 | for (i--; i >= start; i--) |
1014 | ptr_ring_cleanup(r: &priv->rq[i].xdp_ring, destroy: veth_ptr_free); |
1015 | i = end; |
1016 | err_page_pool: |
1017 | for (i--; i >= start; i--) { |
1018 | page_pool_destroy(pool: priv->rq[i].page_pool); |
1019 | priv->rq[i].page_pool = NULL; |
1020 | } |
1021 | |
1022 | return err; |
1023 | } |
1024 | |
1025 | static int __veth_napi_enable(struct net_device *dev) |
1026 | { |
1027 | return __veth_napi_enable_range(dev, start: 0, end: dev->real_num_rx_queues); |
1028 | } |
1029 | |
1030 | static void veth_napi_del_range(struct net_device *dev, int start, int end) |
1031 | { |
1032 | struct veth_priv *priv = netdev_priv(dev); |
1033 | int i; |
1034 | |
1035 | for (i = start; i < end; i++) { |
1036 | struct veth_rq *rq = &priv->rq[i]; |
1037 | |
1038 | rcu_assign_pointer(priv->rq[i].napi, NULL); |
1039 | napi_disable(n: &rq->xdp_napi); |
1040 | __netif_napi_del(napi: &rq->xdp_napi); |
1041 | } |
1042 | synchronize_net(); |
1043 | |
1044 | for (i = start; i < end; i++) { |
1045 | struct veth_rq *rq = &priv->rq[i]; |
1046 | |
1047 | rq->rx_notify_masked = false; |
1048 | ptr_ring_cleanup(r: &rq->xdp_ring, destroy: veth_ptr_free); |
1049 | } |
1050 | |
1051 | for (i = start; i < end; i++) { |
1052 | page_pool_destroy(pool: priv->rq[i].page_pool); |
1053 | priv->rq[i].page_pool = NULL; |
1054 | } |
1055 | } |
1056 | |
1057 | static void veth_napi_del(struct net_device *dev) |
1058 | { |
1059 | veth_napi_del_range(dev, start: 0, end: dev->real_num_rx_queues); |
1060 | } |
1061 | |
1062 | static bool veth_gro_requested(const struct net_device *dev) |
1063 | { |
1064 | return !!(dev->wanted_features & NETIF_F_GRO); |
1065 | } |
1066 | |
1067 | static int veth_enable_xdp_range(struct net_device *dev, int start, int end, |
1068 | bool napi_already_on) |
1069 | { |
1070 | struct veth_priv *priv = netdev_priv(dev); |
1071 | int err, i; |
1072 | |
1073 | for (i = start; i < end; i++) { |
1074 | struct veth_rq *rq = &priv->rq[i]; |
1075 | |
1076 | if (!napi_already_on) |
1077 | netif_napi_add(dev, napi: &rq->xdp_napi, poll: veth_poll); |
1078 | err = xdp_rxq_info_reg(xdp_rxq: &rq->xdp_rxq, dev, queue_index: i, napi_id: rq->xdp_napi.napi_id); |
1079 | if (err < 0) |
1080 | goto err_rxq_reg; |
1081 | |
1082 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &rq->xdp_rxq, |
1083 | type: MEM_TYPE_PAGE_SHARED, |
1084 | NULL); |
1085 | if (err < 0) |
1086 | goto err_reg_mem; |
1087 | |
1088 | /* Save original mem info as it can be overwritten */ |
1089 | rq->xdp_mem = rq->xdp_rxq.mem; |
1090 | } |
1091 | return 0; |
1092 | |
1093 | err_reg_mem: |
1094 | xdp_rxq_info_unreg(xdp_rxq: &priv->rq[i].xdp_rxq); |
1095 | err_rxq_reg: |
1096 | for (i--; i >= start; i--) { |
1097 | struct veth_rq *rq = &priv->rq[i]; |
1098 | |
1099 | xdp_rxq_info_unreg(xdp_rxq: &rq->xdp_rxq); |
1100 | if (!napi_already_on) |
1101 | netif_napi_del(napi: &rq->xdp_napi); |
1102 | } |
1103 | |
1104 | return err; |
1105 | } |
1106 | |
1107 | static void veth_disable_xdp_range(struct net_device *dev, int start, int end, |
1108 | bool delete_napi) |
1109 | { |
1110 | struct veth_priv *priv = netdev_priv(dev); |
1111 | int i; |
1112 | |
1113 | for (i = start; i < end; i++) { |
1114 | struct veth_rq *rq = &priv->rq[i]; |
1115 | |
1116 | rq->xdp_rxq.mem = rq->xdp_mem; |
1117 | xdp_rxq_info_unreg(xdp_rxq: &rq->xdp_rxq); |
1118 | |
1119 | if (delete_napi) |
1120 | netif_napi_del(napi: &rq->xdp_napi); |
1121 | } |
1122 | } |
1123 | |
1124 | static int veth_enable_xdp(struct net_device *dev) |
1125 | { |
1126 | bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); |
1127 | struct veth_priv *priv = netdev_priv(dev); |
1128 | int err, i; |
1129 | |
1130 | if (!xdp_rxq_info_is_reg(xdp_rxq: &priv->rq[0].xdp_rxq)) { |
1131 | err = veth_enable_xdp_range(dev, start: 0, end: dev->real_num_rx_queues, napi_already_on); |
1132 | if (err) |
1133 | return err; |
1134 | |
1135 | if (!napi_already_on) { |
1136 | err = __veth_napi_enable(dev); |
1137 | if (err) { |
1138 | veth_disable_xdp_range(dev, start: 0, end: dev->real_num_rx_queues, delete_napi: true); |
1139 | return err; |
1140 | } |
1141 | } |
1142 | } |
1143 | |
1144 | for (i = 0; i < dev->real_num_rx_queues; i++) { |
1145 | rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); |
1146 | rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); |
1147 | } |
1148 | |
1149 | return 0; |
1150 | } |
1151 | |
1152 | static void veth_disable_xdp(struct net_device *dev) |
1153 | { |
1154 | struct veth_priv *priv = netdev_priv(dev); |
1155 | int i; |
1156 | |
1157 | for (i = 0; i < dev->real_num_rx_queues; i++) |
1158 | rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); |
1159 | |
1160 | if (!netif_running(dev) || !veth_gro_requested(dev)) |
1161 | veth_napi_del(dev); |
1162 | |
1163 | veth_disable_xdp_range(dev, start: 0, end: dev->real_num_rx_queues, delete_napi: false); |
1164 | } |
1165 | |
1166 | static int veth_napi_enable_range(struct net_device *dev, int start, int end) |
1167 | { |
1168 | struct veth_priv *priv = netdev_priv(dev); |
1169 | int err, i; |
1170 | |
1171 | for (i = start; i < end; i++) { |
1172 | struct veth_rq *rq = &priv->rq[i]; |
1173 | |
1174 | netif_napi_add(dev, napi: &rq->xdp_napi, poll: veth_poll); |
1175 | } |
1176 | |
1177 | err = __veth_napi_enable_range(dev, start, end); |
1178 | if (err) { |
1179 | for (i = start; i < end; i++) { |
1180 | struct veth_rq *rq = &priv->rq[i]; |
1181 | |
1182 | netif_napi_del(napi: &rq->xdp_napi); |
1183 | } |
1184 | return err; |
1185 | } |
1186 | return err; |
1187 | } |
1188 | |
1189 | static int veth_napi_enable(struct net_device *dev) |
1190 | { |
1191 | return veth_napi_enable_range(dev, start: 0, end: dev->real_num_rx_queues); |
1192 | } |
1193 | |
1194 | static void veth_disable_range_safe(struct net_device *dev, int start, int end) |
1195 | { |
1196 | struct veth_priv *priv = netdev_priv(dev); |
1197 | |
1198 | if (start >= end) |
1199 | return; |
1200 | |
1201 | if (priv->_xdp_prog) { |
1202 | veth_napi_del_range(dev, start, end); |
1203 | veth_disable_xdp_range(dev, start, end, delete_napi: false); |
1204 | } else if (veth_gro_requested(dev)) { |
1205 | veth_napi_del_range(dev, start, end); |
1206 | } |
1207 | } |
1208 | |
1209 | static int veth_enable_range_safe(struct net_device *dev, int start, int end) |
1210 | { |
1211 | struct veth_priv *priv = netdev_priv(dev); |
1212 | int err; |
1213 | |
1214 | if (start >= end) |
1215 | return 0; |
1216 | |
1217 | if (priv->_xdp_prog) { |
1218 | /* these channels are freshly initialized, napi is not on there even |
1219 | * when GRO is requeste |
1220 | */ |
1221 | err = veth_enable_xdp_range(dev, start, end, napi_already_on: false); |
1222 | if (err) |
1223 | return err; |
1224 | |
1225 | err = __veth_napi_enable_range(dev, start, end); |
1226 | if (err) { |
1227 | /* on error always delete the newly added napis */ |
1228 | veth_disable_xdp_range(dev, start, end, delete_napi: true); |
1229 | return err; |
1230 | } |
1231 | } else if (veth_gro_requested(dev)) { |
1232 | return veth_napi_enable_range(dev, start, end); |
1233 | } |
1234 | return 0; |
1235 | } |
1236 | |
1237 | static void veth_set_xdp_features(struct net_device *dev) |
1238 | { |
1239 | struct veth_priv *priv = netdev_priv(dev); |
1240 | struct net_device *peer; |
1241 | |
1242 | peer = rtnl_dereference(priv->peer); |
1243 | if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { |
1244 | struct veth_priv *priv_peer = netdev_priv(dev: peer); |
1245 | xdp_features_t val = NETDEV_XDP_ACT_BASIC | |
1246 | NETDEV_XDP_ACT_REDIRECT | |
1247 | NETDEV_XDP_ACT_RX_SG; |
1248 | |
1249 | if (priv_peer->_xdp_prog || veth_gro_requested(dev: peer)) |
1250 | val |= NETDEV_XDP_ACT_NDO_XMIT | |
1251 | NETDEV_XDP_ACT_NDO_XMIT_SG; |
1252 | xdp_set_features_flag(dev, val); |
1253 | } else { |
1254 | xdp_clear_features_flag(dev); |
1255 | } |
1256 | } |
1257 | |
1258 | static int veth_set_channels(struct net_device *dev, |
1259 | struct ethtool_channels *ch) |
1260 | { |
1261 | struct veth_priv *priv = netdev_priv(dev); |
1262 | unsigned int old_rx_count, new_rx_count; |
1263 | struct veth_priv *peer_priv; |
1264 | struct net_device *peer; |
1265 | int err; |
1266 | |
1267 | /* sanity check. Upper bounds are already enforced by the caller */ |
1268 | if (!ch->rx_count || !ch->tx_count) |
1269 | return -EINVAL; |
1270 | |
1271 | /* avoid braking XDP, if that is enabled */ |
1272 | peer = rtnl_dereference(priv->peer); |
1273 | peer_priv = peer ? netdev_priv(dev: peer) : NULL; |
1274 | if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues) |
1275 | return -EINVAL; |
1276 | |
1277 | if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues) |
1278 | return -EINVAL; |
1279 | |
1280 | old_rx_count = dev->real_num_rx_queues; |
1281 | new_rx_count = ch->rx_count; |
1282 | if (netif_running(dev)) { |
1283 | /* turn device off */ |
1284 | netif_carrier_off(dev); |
1285 | if (peer) |
1286 | netif_carrier_off(dev: peer); |
1287 | |
1288 | /* try to allocate new resurces, as needed*/ |
1289 | err = veth_enable_range_safe(dev, start: old_rx_count, end: new_rx_count); |
1290 | if (err) |
1291 | goto out; |
1292 | } |
1293 | |
1294 | err = netif_set_real_num_rx_queues(dev, rxq: ch->rx_count); |
1295 | if (err) |
1296 | goto revert; |
1297 | |
1298 | err = netif_set_real_num_tx_queues(dev, txq: ch->tx_count); |
1299 | if (err) { |
1300 | int err2 = netif_set_real_num_rx_queues(dev, rxq: old_rx_count); |
1301 | |
1302 | /* this error condition could happen only if rx and tx change |
1303 | * in opposite directions (e.g. tx nr raises, rx nr decreases) |
1304 | * and we can't do anything to fully restore the original |
1305 | * status |
1306 | */ |
1307 | if (err2) |
1308 | pr_warn("Can't restore rx queues config %d -> %d %d" , |
1309 | new_rx_count, old_rx_count, err2); |
1310 | else |
1311 | goto revert; |
1312 | } |
1313 | |
1314 | out: |
1315 | if (netif_running(dev)) { |
1316 | /* note that we need to swap the arguments WRT the enable part |
1317 | * to identify the range we have to disable |
1318 | */ |
1319 | veth_disable_range_safe(dev, start: new_rx_count, end: old_rx_count); |
1320 | netif_carrier_on(dev); |
1321 | if (peer) |
1322 | netif_carrier_on(dev: peer); |
1323 | } |
1324 | |
1325 | /* update XDP supported features */ |
1326 | veth_set_xdp_features(dev); |
1327 | if (peer) |
1328 | veth_set_xdp_features(dev: peer); |
1329 | |
1330 | return err; |
1331 | |
1332 | revert: |
1333 | new_rx_count = old_rx_count; |
1334 | old_rx_count = ch->rx_count; |
1335 | goto out; |
1336 | } |
1337 | |
1338 | static int veth_open(struct net_device *dev) |
1339 | { |
1340 | struct veth_priv *priv = netdev_priv(dev); |
1341 | struct net_device *peer = rtnl_dereference(priv->peer); |
1342 | int err; |
1343 | |
1344 | if (!peer) |
1345 | return -ENOTCONN; |
1346 | |
1347 | if (priv->_xdp_prog) { |
1348 | err = veth_enable_xdp(dev); |
1349 | if (err) |
1350 | return err; |
1351 | } else if (veth_gro_requested(dev)) { |
1352 | err = veth_napi_enable(dev); |
1353 | if (err) |
1354 | return err; |
1355 | } |
1356 | |
1357 | if (peer->flags & IFF_UP) { |
1358 | netif_carrier_on(dev); |
1359 | netif_carrier_on(dev: peer); |
1360 | } |
1361 | |
1362 | veth_set_xdp_features(dev); |
1363 | |
1364 | return 0; |
1365 | } |
1366 | |
1367 | static int veth_close(struct net_device *dev) |
1368 | { |
1369 | struct veth_priv *priv = netdev_priv(dev); |
1370 | struct net_device *peer = rtnl_dereference(priv->peer); |
1371 | |
1372 | netif_carrier_off(dev); |
1373 | if (peer) |
1374 | netif_carrier_off(dev: peer); |
1375 | |
1376 | if (priv->_xdp_prog) |
1377 | veth_disable_xdp(dev); |
1378 | else if (veth_gro_requested(dev)) |
1379 | veth_napi_del(dev); |
1380 | |
1381 | return 0; |
1382 | } |
1383 | |
1384 | static int is_valid_veth_mtu(int mtu) |
1385 | { |
1386 | return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU; |
1387 | } |
1388 | |
1389 | static int veth_alloc_queues(struct net_device *dev) |
1390 | { |
1391 | struct veth_priv *priv = netdev_priv(dev); |
1392 | int i; |
1393 | |
1394 | priv->rq = kvcalloc(n: dev->num_rx_queues, size: sizeof(*priv->rq), |
1395 | GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); |
1396 | if (!priv->rq) |
1397 | return -ENOMEM; |
1398 | |
1399 | for (i = 0; i < dev->num_rx_queues; i++) { |
1400 | priv->rq[i].dev = dev; |
1401 | u64_stats_init(syncp: &priv->rq[i].stats.syncp); |
1402 | } |
1403 | |
1404 | return 0; |
1405 | } |
1406 | |
1407 | static void veth_free_queues(struct net_device *dev) |
1408 | { |
1409 | struct veth_priv *priv = netdev_priv(dev); |
1410 | |
1411 | kvfree(addr: priv->rq); |
1412 | } |
1413 | |
1414 | static int veth_dev_init(struct net_device *dev) |
1415 | { |
1416 | netdev_lockdep_set_classes(dev); |
1417 | return veth_alloc_queues(dev); |
1418 | } |
1419 | |
1420 | static void veth_dev_free(struct net_device *dev) |
1421 | { |
1422 | veth_free_queues(dev); |
1423 | } |
1424 | |
1425 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1426 | static void veth_poll_controller(struct net_device *dev) |
1427 | { |
1428 | /* veth only receives frames when its peer sends one |
1429 | * Since it has nothing to do with disabling irqs, we are guaranteed |
1430 | * never to have pending data when we poll for it so |
1431 | * there is nothing to do here. |
1432 | * |
1433 | * We need this though so netpoll recognizes us as an interface that |
1434 | * supports polling, which enables bridge devices in virt setups to |
1435 | * still use netconsole |
1436 | */ |
1437 | } |
1438 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
1439 | |
1440 | static int veth_get_iflink(const struct net_device *dev) |
1441 | { |
1442 | struct veth_priv *priv = netdev_priv(dev); |
1443 | struct net_device *peer; |
1444 | int iflink; |
1445 | |
1446 | rcu_read_lock(); |
1447 | peer = rcu_dereference(priv->peer); |
1448 | iflink = peer ? READ_ONCE(peer->ifindex) : 0; |
1449 | rcu_read_unlock(); |
1450 | |
1451 | return iflink; |
1452 | } |
1453 | |
1454 | static netdev_features_t veth_fix_features(struct net_device *dev, |
1455 | netdev_features_t features) |
1456 | { |
1457 | struct veth_priv *priv = netdev_priv(dev); |
1458 | struct net_device *peer; |
1459 | |
1460 | peer = rtnl_dereference(priv->peer); |
1461 | if (peer) { |
1462 | struct veth_priv *peer_priv = netdev_priv(dev: peer); |
1463 | |
1464 | if (peer_priv->_xdp_prog) |
1465 | features &= ~NETIF_F_GSO_SOFTWARE; |
1466 | } |
1467 | |
1468 | return features; |
1469 | } |
1470 | |
1471 | static int veth_set_features(struct net_device *dev, |
1472 | netdev_features_t features) |
1473 | { |
1474 | netdev_features_t changed = features ^ dev->features; |
1475 | struct veth_priv *priv = netdev_priv(dev); |
1476 | struct net_device *peer; |
1477 | int err; |
1478 | |
1479 | if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog) |
1480 | return 0; |
1481 | |
1482 | peer = rtnl_dereference(priv->peer); |
1483 | if (features & NETIF_F_GRO) { |
1484 | err = veth_napi_enable(dev); |
1485 | if (err) |
1486 | return err; |
1487 | |
1488 | if (peer) |
1489 | xdp_features_set_redirect_target(dev: peer, support_sg: true); |
1490 | } else { |
1491 | if (peer) |
1492 | xdp_features_clear_redirect_target(dev: peer); |
1493 | veth_napi_del(dev); |
1494 | } |
1495 | return 0; |
1496 | } |
1497 | |
1498 | static void veth_set_rx_headroom(struct net_device *dev, int new_hr) |
1499 | { |
1500 | struct veth_priv *peer_priv, *priv = netdev_priv(dev); |
1501 | struct net_device *peer; |
1502 | |
1503 | if (new_hr < 0) |
1504 | new_hr = 0; |
1505 | |
1506 | rcu_read_lock(); |
1507 | peer = rcu_dereference(priv->peer); |
1508 | if (unlikely(!peer)) |
1509 | goto out; |
1510 | |
1511 | peer_priv = netdev_priv(dev: peer); |
1512 | priv->requested_headroom = new_hr; |
1513 | new_hr = max(priv->requested_headroom, peer_priv->requested_headroom); |
1514 | dev->needed_headroom = new_hr; |
1515 | peer->needed_headroom = new_hr; |
1516 | |
1517 | out: |
1518 | rcu_read_unlock(); |
1519 | } |
1520 | |
1521 | static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
1522 | struct netlink_ext_ack *extack) |
1523 | { |
1524 | struct veth_priv *priv = netdev_priv(dev); |
1525 | struct bpf_prog *old_prog; |
1526 | struct net_device *peer; |
1527 | unsigned int max_mtu; |
1528 | int err; |
1529 | |
1530 | old_prog = priv->_xdp_prog; |
1531 | priv->_xdp_prog = prog; |
1532 | peer = rtnl_dereference(priv->peer); |
1533 | |
1534 | if (prog) { |
1535 | if (!peer) { |
1536 | NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached" ); |
1537 | err = -ENOTCONN; |
1538 | goto err; |
1539 | } |
1540 | |
1541 | max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) - |
1542 | peer->hard_header_len; |
1543 | /* Allow increasing the max_mtu if the program supports |
1544 | * XDP fragments. |
1545 | */ |
1546 | if (prog->aux->xdp_has_frags) |
1547 | max_mtu += PAGE_SIZE * MAX_SKB_FRAGS; |
1548 | |
1549 | if (peer->mtu > max_mtu) { |
1550 | NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP" ); |
1551 | err = -ERANGE; |
1552 | goto err; |
1553 | } |
1554 | |
1555 | if (dev->real_num_rx_queues < peer->real_num_tx_queues) { |
1556 | NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues" ); |
1557 | err = -ENOSPC; |
1558 | goto err; |
1559 | } |
1560 | |
1561 | if (dev->flags & IFF_UP) { |
1562 | err = veth_enable_xdp(dev); |
1563 | if (err) { |
1564 | NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed" ); |
1565 | goto err; |
1566 | } |
1567 | } |
1568 | |
1569 | if (!old_prog) { |
1570 | peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; |
1571 | peer->max_mtu = max_mtu; |
1572 | } |
1573 | |
1574 | xdp_features_set_redirect_target(dev: peer, support_sg: true); |
1575 | } |
1576 | |
1577 | if (old_prog) { |
1578 | if (!prog) { |
1579 | if (peer && !veth_gro_requested(dev)) |
1580 | xdp_features_clear_redirect_target(dev: peer); |
1581 | |
1582 | if (dev->flags & IFF_UP) |
1583 | veth_disable_xdp(dev); |
1584 | |
1585 | if (peer) { |
1586 | peer->hw_features |= NETIF_F_GSO_SOFTWARE; |
1587 | peer->max_mtu = ETH_MAX_MTU; |
1588 | } |
1589 | } |
1590 | bpf_prog_put(prog: old_prog); |
1591 | } |
1592 | |
1593 | if ((!!old_prog ^ !!prog) && peer) |
1594 | netdev_update_features(dev: peer); |
1595 | |
1596 | return 0; |
1597 | err: |
1598 | priv->_xdp_prog = old_prog; |
1599 | |
1600 | return err; |
1601 | } |
1602 | |
1603 | static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
1604 | { |
1605 | switch (xdp->command) { |
1606 | case XDP_SETUP_PROG: |
1607 | return veth_xdp_set(dev, prog: xdp->prog, extack: xdp->extack); |
1608 | default: |
1609 | return -EINVAL; |
1610 | } |
1611 | } |
1612 | |
1613 | static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) |
1614 | { |
1615 | struct veth_xdp_buff *_ctx = (void *)ctx; |
1616 | |
1617 | if (!_ctx->skb) |
1618 | return -ENODATA; |
1619 | |
1620 | *timestamp = skb_hwtstamps(skb: _ctx->skb)->hwtstamp; |
1621 | return 0; |
1622 | } |
1623 | |
1624 | static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, |
1625 | enum xdp_rss_hash_type *) |
1626 | { |
1627 | struct veth_xdp_buff *_ctx = (void *)ctx; |
1628 | struct sk_buff *skb = _ctx->skb; |
1629 | |
1630 | if (!skb) |
1631 | return -ENODATA; |
1632 | |
1633 | *hash = skb_get_hash(skb); |
1634 | *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE; |
1635 | |
1636 | return 0; |
1637 | } |
1638 | |
1639 | static int veth_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, |
1640 | u16 *vlan_tci) |
1641 | { |
1642 | const struct veth_xdp_buff *_ctx = (void *)ctx; |
1643 | const struct sk_buff *skb = _ctx->skb; |
1644 | int err; |
1645 | |
1646 | if (!skb) |
1647 | return -ENODATA; |
1648 | |
1649 | err = __vlan_hwaccel_get_tag(skb, vlan_tci); |
1650 | if (err) |
1651 | return err; |
1652 | |
1653 | *vlan_proto = skb->vlan_proto; |
1654 | return err; |
1655 | } |
1656 | |
1657 | static const struct net_device_ops veth_netdev_ops = { |
1658 | .ndo_init = veth_dev_init, |
1659 | .ndo_open = veth_open, |
1660 | .ndo_stop = veth_close, |
1661 | .ndo_start_xmit = veth_xmit, |
1662 | .ndo_get_stats64 = veth_get_stats64, |
1663 | .ndo_set_rx_mode = veth_set_multicast_list, |
1664 | .ndo_set_mac_address = eth_mac_addr, |
1665 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1666 | .ndo_poll_controller = veth_poll_controller, |
1667 | #endif |
1668 | .ndo_get_iflink = veth_get_iflink, |
1669 | .ndo_fix_features = veth_fix_features, |
1670 | .ndo_set_features = veth_set_features, |
1671 | .ndo_features_check = passthru_features_check, |
1672 | .ndo_set_rx_headroom = veth_set_rx_headroom, |
1673 | .ndo_bpf = veth_xdp, |
1674 | .ndo_xdp_xmit = veth_ndo_xdp_xmit, |
1675 | .ndo_get_peer_dev = veth_peer_dev, |
1676 | }; |
1677 | |
1678 | static const struct xdp_metadata_ops veth_xdp_metadata_ops = { |
1679 | .xmo_rx_timestamp = veth_xdp_rx_timestamp, |
1680 | .xmo_rx_hash = veth_xdp_rx_hash, |
1681 | .xmo_rx_vlan_tag = veth_xdp_rx_vlan_tag, |
1682 | }; |
1683 | |
1684 | #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ |
1685 | NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \ |
1686 | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ |
1687 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \ |
1688 | NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX ) |
1689 | |
1690 | static void veth_setup(struct net_device *dev) |
1691 | { |
1692 | ether_setup(dev); |
1693 | |
1694 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1695 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1696 | dev->priv_flags |= IFF_NO_QUEUE; |
1697 | dev->priv_flags |= IFF_PHONY_HEADROOM; |
1698 | |
1699 | dev->netdev_ops = &veth_netdev_ops; |
1700 | dev->xdp_metadata_ops = &veth_xdp_metadata_ops; |
1701 | dev->ethtool_ops = &veth_ethtool_ops; |
1702 | dev->features |= NETIF_F_LLTX; |
1703 | dev->features |= VETH_FEATURES; |
1704 | dev->vlan_features = dev->features & |
1705 | ~(NETIF_F_HW_VLAN_CTAG_TX | |
1706 | NETIF_F_HW_VLAN_STAG_TX | |
1707 | NETIF_F_HW_VLAN_CTAG_RX | |
1708 | NETIF_F_HW_VLAN_STAG_RX); |
1709 | dev->needs_free_netdev = true; |
1710 | dev->priv_destructor = veth_dev_free; |
1711 | dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; |
1712 | dev->max_mtu = ETH_MAX_MTU; |
1713 | |
1714 | dev->hw_features = VETH_FEATURES; |
1715 | dev->hw_enc_features = VETH_FEATURES; |
1716 | dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; |
1717 | netif_set_tso_max_size(dev, GSO_MAX_SIZE); |
1718 | } |
1719 | |
1720 | /* |
1721 | * netlink interface |
1722 | */ |
1723 | |
1724 | static int veth_validate(struct nlattr *tb[], struct nlattr *data[], |
1725 | struct netlink_ext_ack *extack) |
1726 | { |
1727 | if (tb[IFLA_ADDRESS]) { |
1728 | if (nla_len(nla: tb[IFLA_ADDRESS]) != ETH_ALEN) |
1729 | return -EINVAL; |
1730 | if (!is_valid_ether_addr(addr: nla_data(nla: tb[IFLA_ADDRESS]))) |
1731 | return -EADDRNOTAVAIL; |
1732 | } |
1733 | if (tb[IFLA_MTU]) { |
1734 | if (!is_valid_veth_mtu(mtu: nla_get_u32(nla: tb[IFLA_MTU]))) |
1735 | return -EINVAL; |
1736 | } |
1737 | return 0; |
1738 | } |
1739 | |
1740 | static struct rtnl_link_ops veth_link_ops; |
1741 | |
1742 | static void veth_disable_gro(struct net_device *dev) |
1743 | { |
1744 | dev->features &= ~NETIF_F_GRO; |
1745 | dev->wanted_features &= ~NETIF_F_GRO; |
1746 | netdev_update_features(dev); |
1747 | } |
1748 | |
1749 | static int veth_init_queues(struct net_device *dev, struct nlattr *tb[]) |
1750 | { |
1751 | int err; |
1752 | |
1753 | if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) { |
1754 | err = netif_set_real_num_tx_queues(dev, txq: 1); |
1755 | if (err) |
1756 | return err; |
1757 | } |
1758 | if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) { |
1759 | err = netif_set_real_num_rx_queues(dev, rxq: 1); |
1760 | if (err) |
1761 | return err; |
1762 | } |
1763 | return 0; |
1764 | } |
1765 | |
1766 | static int veth_newlink(struct net *src_net, struct net_device *dev, |
1767 | struct nlattr *tb[], struct nlattr *data[], |
1768 | struct netlink_ext_ack *extack) |
1769 | { |
1770 | int err; |
1771 | struct net_device *peer; |
1772 | struct veth_priv *priv; |
1773 | char ifname[IFNAMSIZ]; |
1774 | struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; |
1775 | unsigned char name_assign_type; |
1776 | struct ifinfomsg *ifmp; |
1777 | struct net *net; |
1778 | |
1779 | /* |
1780 | * create and register peer first |
1781 | */ |
1782 | if (data != NULL && data[VETH_INFO_PEER] != NULL) { |
1783 | struct nlattr *nla_peer; |
1784 | |
1785 | nla_peer = data[VETH_INFO_PEER]; |
1786 | ifmp = nla_data(nla: nla_peer); |
1787 | err = rtnl_nla_parse_ifinfomsg(tb: peer_tb, nla_peer, exterr: extack); |
1788 | if (err < 0) |
1789 | return err; |
1790 | |
1791 | err = veth_validate(tb: peer_tb, NULL, extack); |
1792 | if (err < 0) |
1793 | return err; |
1794 | |
1795 | tbp = peer_tb; |
1796 | } else { |
1797 | ifmp = NULL; |
1798 | tbp = tb; |
1799 | } |
1800 | |
1801 | if (ifmp && tbp[IFLA_IFNAME]) { |
1802 | nla_strscpy(dst: ifname, nla: tbp[IFLA_IFNAME], IFNAMSIZ); |
1803 | name_assign_type = NET_NAME_USER; |
1804 | } else { |
1805 | snprintf(buf: ifname, IFNAMSIZ, DRV_NAME "%%d" ); |
1806 | name_assign_type = NET_NAME_ENUM; |
1807 | } |
1808 | |
1809 | net = rtnl_link_get_net(src_net, tb: tbp); |
1810 | if (IS_ERR(ptr: net)) |
1811 | return PTR_ERR(ptr: net); |
1812 | |
1813 | peer = rtnl_create_link(net, ifname, name_assign_type, |
1814 | ops: &veth_link_ops, tb: tbp, extack); |
1815 | if (IS_ERR(ptr: peer)) { |
1816 | put_net(net); |
1817 | return PTR_ERR(ptr: peer); |
1818 | } |
1819 | |
1820 | if (!ifmp || !tbp[IFLA_ADDRESS]) |
1821 | eth_hw_addr_random(dev: peer); |
1822 | |
1823 | if (ifmp && (dev->ifindex != 0)) |
1824 | peer->ifindex = ifmp->ifi_index; |
1825 | |
1826 | netif_inherit_tso_max(to: peer, from: dev); |
1827 | |
1828 | err = register_netdevice(dev: peer); |
1829 | put_net(net); |
1830 | net = NULL; |
1831 | if (err < 0) |
1832 | goto err_register_peer; |
1833 | |
1834 | /* keep GRO disabled by default to be consistent with the established |
1835 | * veth behavior |
1836 | */ |
1837 | veth_disable_gro(dev: peer); |
1838 | netif_carrier_off(dev: peer); |
1839 | |
1840 | err = rtnl_configure_link(dev: peer, ifm: ifmp, portid: 0, NULL); |
1841 | if (err < 0) |
1842 | goto err_configure_peer; |
1843 | |
1844 | /* |
1845 | * register dev last |
1846 | * |
1847 | * note, that since we've registered new device the dev's name |
1848 | * should be re-allocated |
1849 | */ |
1850 | |
1851 | if (tb[IFLA_ADDRESS] == NULL) |
1852 | eth_hw_addr_random(dev); |
1853 | |
1854 | if (tb[IFLA_IFNAME]) |
1855 | nla_strscpy(dst: dev->name, nla: tb[IFLA_IFNAME], IFNAMSIZ); |
1856 | else |
1857 | snprintf(buf: dev->name, IFNAMSIZ, DRV_NAME "%%d" ); |
1858 | |
1859 | err = register_netdevice(dev); |
1860 | if (err < 0) |
1861 | goto err_register_dev; |
1862 | |
1863 | netif_carrier_off(dev); |
1864 | |
1865 | /* |
1866 | * tie the deviced together |
1867 | */ |
1868 | |
1869 | priv = netdev_priv(dev); |
1870 | rcu_assign_pointer(priv->peer, peer); |
1871 | err = veth_init_queues(dev, tb); |
1872 | if (err) |
1873 | goto err_queues; |
1874 | |
1875 | priv = netdev_priv(dev: peer); |
1876 | rcu_assign_pointer(priv->peer, dev); |
1877 | err = veth_init_queues(dev: peer, tb); |
1878 | if (err) |
1879 | goto err_queues; |
1880 | |
1881 | veth_disable_gro(dev); |
1882 | /* update XDP supported features */ |
1883 | veth_set_xdp_features(dev); |
1884 | veth_set_xdp_features(dev: peer); |
1885 | |
1886 | return 0; |
1887 | |
1888 | err_queues: |
1889 | unregister_netdevice(dev); |
1890 | err_register_dev: |
1891 | /* nothing to do */ |
1892 | err_configure_peer: |
1893 | unregister_netdevice(dev: peer); |
1894 | return err; |
1895 | |
1896 | err_register_peer: |
1897 | free_netdev(dev: peer); |
1898 | return err; |
1899 | } |
1900 | |
1901 | static void veth_dellink(struct net_device *dev, struct list_head *head) |
1902 | { |
1903 | struct veth_priv *priv; |
1904 | struct net_device *peer; |
1905 | |
1906 | priv = netdev_priv(dev); |
1907 | peer = rtnl_dereference(priv->peer); |
1908 | |
1909 | /* Note : dellink() is called from default_device_exit_batch(), |
1910 | * before a rcu_synchronize() point. The devices are guaranteed |
1911 | * not being freed before one RCU grace period. |
1912 | */ |
1913 | RCU_INIT_POINTER(priv->peer, NULL); |
1914 | unregister_netdevice_queue(dev, head); |
1915 | |
1916 | if (peer) { |
1917 | priv = netdev_priv(dev: peer); |
1918 | RCU_INIT_POINTER(priv->peer, NULL); |
1919 | unregister_netdevice_queue(dev: peer, head); |
1920 | } |
1921 | } |
1922 | |
1923 | static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { |
1924 | [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, |
1925 | }; |
1926 | |
1927 | static struct net *veth_get_link_net(const struct net_device *dev) |
1928 | { |
1929 | struct veth_priv *priv = netdev_priv(dev); |
1930 | struct net_device *peer = rtnl_dereference(priv->peer); |
1931 | |
1932 | return peer ? dev_net(dev: peer) : dev_net(dev); |
1933 | } |
1934 | |
1935 | static unsigned int veth_get_num_queues(void) |
1936 | { |
1937 | /* enforce the same queue limit as rtnl_create_link */ |
1938 | int queues = num_possible_cpus(); |
1939 | |
1940 | if (queues > 4096) |
1941 | queues = 4096; |
1942 | return queues; |
1943 | } |
1944 | |
1945 | static struct rtnl_link_ops veth_link_ops = { |
1946 | .kind = DRV_NAME, |
1947 | .priv_size = sizeof(struct veth_priv), |
1948 | .setup = veth_setup, |
1949 | .validate = veth_validate, |
1950 | .newlink = veth_newlink, |
1951 | .dellink = veth_dellink, |
1952 | .policy = veth_policy, |
1953 | .maxtype = VETH_INFO_MAX, |
1954 | .get_link_net = veth_get_link_net, |
1955 | .get_num_tx_queues = veth_get_num_queues, |
1956 | .get_num_rx_queues = veth_get_num_queues, |
1957 | }; |
1958 | |
1959 | /* |
1960 | * init/fini |
1961 | */ |
1962 | |
1963 | static __init int veth_init(void) |
1964 | { |
1965 | return rtnl_link_register(ops: &veth_link_ops); |
1966 | } |
1967 | |
1968 | static __exit void veth_exit(void) |
1969 | { |
1970 | rtnl_link_unregister(ops: &veth_link_ops); |
1971 | } |
1972 | |
1973 | module_init(veth_init); |
1974 | module_exit(veth_exit); |
1975 | |
1976 | MODULE_DESCRIPTION("Virtual Ethernet Tunnel" ); |
1977 | MODULE_LICENSE("GPL v2" ); |
1978 | MODULE_ALIAS_RTNL_LINK(DRV_NAME); |
1979 | |