1 | /* |
2 | * vrf.c: device driver to encapsulate a VRF space |
3 | * |
4 | * Copyright (c) 2015 Cumulus Networks. All rights reserved. |
5 | * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com> |
6 | * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com> |
7 | * |
8 | * Based on dummy, team and ipvlan drivers |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License, or |
13 | * (at your option) any later version. |
14 | */ |
15 | |
16 | #include <linux/module.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/netdevice.h> |
19 | #include <linux/etherdevice.h> |
20 | #include <linux/ip.h> |
21 | #include <linux/init.h> |
22 | #include <linux/moduleparam.h> |
23 | #include <linux/netfilter.h> |
24 | #include <linux/rtnetlink.h> |
25 | #include <net/rtnetlink.h> |
26 | #include <linux/u64_stats_sync.h> |
27 | #include <linux/hashtable.h> |
28 | |
29 | #include <linux/inetdevice.h> |
30 | #include <net/arp.h> |
31 | #include <net/ip.h> |
32 | #include <net/ip_fib.h> |
33 | #include <net/ip6_fib.h> |
34 | #include <net/ip6_route.h> |
35 | #include <net/route.h> |
36 | #include <net/addrconf.h> |
37 | #include <net/l3mdev.h> |
38 | #include <net/fib_rules.h> |
39 | #include <net/netns/generic.h> |
40 | |
41 | #define DRV_NAME "vrf" |
42 | #define DRV_VERSION "1.0" |
43 | |
44 | #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ |
45 | |
46 | static unsigned int vrf_net_id; |
47 | |
48 | struct net_vrf { |
49 | struct rtable __rcu *rth; |
50 | struct rt6_info __rcu *rt6; |
51 | #if IS_ENABLED(CONFIG_IPV6) |
52 | struct fib6_table *fib6_table; |
53 | #endif |
54 | u32 tb_id; |
55 | }; |
56 | |
57 | struct pcpu_dstats { |
58 | u64 tx_pkts; |
59 | u64 tx_bytes; |
60 | u64 tx_drps; |
61 | u64 rx_pkts; |
62 | u64 rx_bytes; |
63 | u64 rx_drps; |
64 | struct u64_stats_sync syncp; |
65 | }; |
66 | |
67 | static void vrf_rx_stats(struct net_device *dev, int len) |
68 | { |
69 | struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); |
70 | |
71 | u64_stats_update_begin(&dstats->syncp); |
72 | dstats->rx_pkts++; |
73 | dstats->rx_bytes += len; |
74 | u64_stats_update_end(&dstats->syncp); |
75 | } |
76 | |
77 | static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) |
78 | { |
79 | vrf_dev->stats.tx_errors++; |
80 | kfree_skb(skb); |
81 | } |
82 | |
83 | static void vrf_get_stats64(struct net_device *dev, |
84 | struct rtnl_link_stats64 *stats) |
85 | { |
86 | int i; |
87 | |
88 | for_each_possible_cpu(i) { |
89 | const struct pcpu_dstats *dstats; |
90 | u64 tbytes, tpkts, tdrops, rbytes, rpkts; |
91 | unsigned int start; |
92 | |
93 | dstats = per_cpu_ptr(dev->dstats, i); |
94 | do { |
95 | start = u64_stats_fetch_begin_irq(&dstats->syncp); |
96 | tbytes = dstats->tx_bytes; |
97 | tpkts = dstats->tx_pkts; |
98 | tdrops = dstats->tx_drps; |
99 | rbytes = dstats->rx_bytes; |
100 | rpkts = dstats->rx_pkts; |
101 | } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); |
102 | stats->tx_bytes += tbytes; |
103 | stats->tx_packets += tpkts; |
104 | stats->tx_dropped += tdrops; |
105 | stats->rx_bytes += rbytes; |
106 | stats->rx_packets += rpkts; |
107 | } |
108 | } |
109 | |
110 | /* by default VRF devices do not have a qdisc and are expected |
111 | * to be created with only a single queue. |
112 | */ |
113 | static bool qdisc_tx_is_default(const struct net_device *dev) |
114 | { |
115 | struct netdev_queue *txq; |
116 | struct Qdisc *qdisc; |
117 | |
118 | if (dev->num_tx_queues > 1) |
119 | return false; |
120 | |
121 | txq = netdev_get_tx_queue(dev, 0); |
122 | qdisc = rcu_access_pointer(txq->qdisc); |
123 | |
124 | return !qdisc->enqueue; |
125 | } |
126 | |
127 | /* Local traffic destined to local address. Reinsert the packet to rx |
128 | * path, similar to loopback handling. |
129 | */ |
130 | static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, |
131 | struct dst_entry *dst) |
132 | { |
133 | int len = skb->len; |
134 | |
135 | skb_orphan(skb); |
136 | |
137 | skb_dst_set(skb, dst); |
138 | |
139 | /* set pkt_type to avoid skb hitting packet taps twice - |
140 | * once on Tx and again in Rx processing |
141 | */ |
142 | skb->pkt_type = PACKET_LOOPBACK; |
143 | |
144 | skb->protocol = eth_type_trans(skb, dev); |
145 | |
146 | if (likely(netif_rx(skb) == NET_RX_SUCCESS)) |
147 | vrf_rx_stats(dev, len); |
148 | else |
149 | this_cpu_inc(dev->dstats->rx_drps); |
150 | |
151 | return NETDEV_TX_OK; |
152 | } |
153 | |
154 | #if IS_ENABLED(CONFIG_IPV6) |
155 | static int vrf_ip6_local_out(struct net *net, struct sock *sk, |
156 | struct sk_buff *skb) |
157 | { |
158 | int err; |
159 | |
160 | err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, |
161 | sk, skb, NULL, skb_dst(skb)->dev, dst_output); |
162 | |
163 | if (likely(err == 1)) |
164 | err = dst_output(net, sk, skb); |
165 | |
166 | return err; |
167 | } |
168 | |
169 | static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, |
170 | struct net_device *dev) |
171 | { |
172 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
173 | struct net *net = dev_net(skb->dev); |
174 | struct flowi6 fl6 = { |
175 | /* needed to match OIF rule */ |
176 | .flowi6_oif = dev->ifindex, |
177 | .flowi6_iif = LOOPBACK_IFINDEX, |
178 | .daddr = iph->daddr, |
179 | .saddr = iph->saddr, |
180 | .flowlabel = ip6_flowinfo(iph), |
181 | .flowi6_mark = skb->mark, |
182 | .flowi6_proto = iph->nexthdr, |
183 | .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF, |
184 | }; |
185 | int ret = NET_XMIT_DROP; |
186 | struct dst_entry *dst; |
187 | struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; |
188 | |
189 | dst = ip6_route_output(net, NULL, &fl6); |
190 | if (dst == dst_null) |
191 | goto err; |
192 | |
193 | skb_dst_drop(skb); |
194 | |
195 | /* if dst.dev is loopback or the VRF device again this is locally |
196 | * originated traffic destined to a local address. Short circuit |
197 | * to Rx path |
198 | */ |
199 | if (dst->dev == dev) |
200 | return vrf_local_xmit(skb, dev, dst); |
201 | |
202 | skb_dst_set(skb, dst); |
203 | |
204 | /* strip the ethernet header added for pass through VRF device */ |
205 | __skb_pull(skb, skb_network_offset(skb)); |
206 | |
207 | ret = vrf_ip6_local_out(net, skb->sk, skb); |
208 | if (unlikely(net_xmit_eval(ret))) |
209 | dev->stats.tx_errors++; |
210 | else |
211 | ret = NET_XMIT_SUCCESS; |
212 | |
213 | return ret; |
214 | err: |
215 | vrf_tx_error(dev, skb); |
216 | return NET_XMIT_DROP; |
217 | } |
218 | #else |
219 | static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, |
220 | struct net_device *dev) |
221 | { |
222 | vrf_tx_error(dev, skb); |
223 | return NET_XMIT_DROP; |
224 | } |
225 | #endif |
226 | |
227 | /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */ |
228 | static int vrf_ip_local_out(struct net *net, struct sock *sk, |
229 | struct sk_buff *skb) |
230 | { |
231 | int err; |
232 | |
233 | err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, |
234 | skb, NULL, skb_dst(skb)->dev, dst_output); |
235 | if (likely(err == 1)) |
236 | err = dst_output(net, sk, skb); |
237 | |
238 | return err; |
239 | } |
240 | |
241 | static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, |
242 | struct net_device *vrf_dev) |
243 | { |
244 | struct iphdr *ip4h = ip_hdr(skb); |
245 | int ret = NET_XMIT_DROP; |
246 | struct flowi4 fl4 = { |
247 | /* needed to match OIF rule */ |
248 | .flowi4_oif = vrf_dev->ifindex, |
249 | .flowi4_iif = LOOPBACK_IFINDEX, |
250 | .flowi4_tos = RT_TOS(ip4h->tos), |
251 | .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, |
252 | .flowi4_proto = ip4h->protocol, |
253 | .daddr = ip4h->daddr, |
254 | .saddr = ip4h->saddr, |
255 | }; |
256 | struct net *net = dev_net(vrf_dev); |
257 | struct rtable *rt; |
258 | |
259 | rt = ip_route_output_flow(net, &fl4, NULL); |
260 | if (IS_ERR(rt)) |
261 | goto err; |
262 | |
263 | skb_dst_drop(skb); |
264 | |
265 | /* if dst.dev is loopback or the VRF device again this is locally |
266 | * originated traffic destined to a local address. Short circuit |
267 | * to Rx path |
268 | */ |
269 | if (rt->dst.dev == vrf_dev) |
270 | return vrf_local_xmit(skb, vrf_dev, &rt->dst); |
271 | |
272 | skb_dst_set(skb, &rt->dst); |
273 | |
274 | /* strip the ethernet header added for pass through VRF device */ |
275 | __skb_pull(skb, skb_network_offset(skb)); |
276 | |
277 | if (!ip4h->saddr) { |
278 | ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, |
279 | RT_SCOPE_LINK); |
280 | } |
281 | |
282 | ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); |
283 | if (unlikely(net_xmit_eval(ret))) |
284 | vrf_dev->stats.tx_errors++; |
285 | else |
286 | ret = NET_XMIT_SUCCESS; |
287 | |
288 | out: |
289 | return ret; |
290 | err: |
291 | vrf_tx_error(vrf_dev, skb); |
292 | goto out; |
293 | } |
294 | |
295 | static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) |
296 | { |
297 | switch (skb->protocol) { |
298 | case htons(ETH_P_IP): |
299 | return vrf_process_v4_outbound(skb, dev); |
300 | case htons(ETH_P_IPV6): |
301 | return vrf_process_v6_outbound(skb, dev); |
302 | default: |
303 | vrf_tx_error(dev, skb); |
304 | return NET_XMIT_DROP; |
305 | } |
306 | } |
307 | |
308 | static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) |
309 | { |
310 | int len = skb->len; |
311 | netdev_tx_t ret = is_ip_tx_frame(skb, dev); |
312 | |
313 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
314 | struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); |
315 | |
316 | u64_stats_update_begin(&dstats->syncp); |
317 | dstats->tx_pkts++; |
318 | dstats->tx_bytes += len; |
319 | u64_stats_update_end(&dstats->syncp); |
320 | } else { |
321 | this_cpu_inc(dev->dstats->tx_drps); |
322 | } |
323 | |
324 | return ret; |
325 | } |
326 | |
327 | static int vrf_finish_direct(struct net *net, struct sock *sk, |
328 | struct sk_buff *skb) |
329 | { |
330 | struct net_device *vrf_dev = skb->dev; |
331 | |
332 | if (!list_empty(&vrf_dev->ptype_all) && |
333 | likely(skb_headroom(skb) >= ETH_HLEN)) { |
334 | struct ethhdr *eth = skb_push(skb, ETH_HLEN); |
335 | |
336 | ether_addr_copy(eth->h_source, vrf_dev->dev_addr); |
337 | eth_zero_addr(eth->h_dest); |
338 | eth->h_proto = skb->protocol; |
339 | |
340 | rcu_read_lock_bh(); |
341 | dev_queue_xmit_nit(skb, vrf_dev); |
342 | rcu_read_unlock_bh(); |
343 | |
344 | skb_pull(skb, ETH_HLEN); |
345 | } |
346 | |
347 | return 1; |
348 | } |
349 | |
350 | #if IS_ENABLED(CONFIG_IPV6) |
351 | /* modelled after ip6_finish_output2 */ |
352 | static int vrf_finish_output6(struct net *net, struct sock *sk, |
353 | struct sk_buff *skb) |
354 | { |
355 | struct dst_entry *dst = skb_dst(skb); |
356 | struct net_device *dev = dst->dev; |
357 | struct neighbour *neigh; |
358 | struct in6_addr *nexthop; |
359 | int ret; |
360 | |
361 | nf_reset(skb); |
362 | |
363 | skb->protocol = htons(ETH_P_IPV6); |
364 | skb->dev = dev; |
365 | |
366 | rcu_read_lock_bh(); |
367 | nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); |
368 | neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); |
369 | if (unlikely(!neigh)) |
370 | neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); |
371 | if (!IS_ERR(neigh)) { |
372 | sock_confirm_neigh(skb, neigh); |
373 | ret = neigh_output(neigh, skb); |
374 | rcu_read_unlock_bh(); |
375 | return ret; |
376 | } |
377 | rcu_read_unlock_bh(); |
378 | |
379 | IP6_INC_STATS(dev_net(dst->dev), |
380 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
381 | kfree_skb(skb); |
382 | return -EINVAL; |
383 | } |
384 | |
385 | /* modelled after ip6_output */ |
386 | static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) |
387 | { |
388 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, |
389 | net, sk, skb, NULL, skb_dst(skb)->dev, |
390 | vrf_finish_output6, |
391 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); |
392 | } |
393 | |
394 | /* set dst on skb to send packet to us via dev_xmit path. Allows |
395 | * packet to go through device based features such as qdisc, netfilter |
396 | * hooks and packet sockets with skb->dev set to vrf device. |
397 | */ |
398 | static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, |
399 | struct sk_buff *skb) |
400 | { |
401 | struct net_vrf *vrf = netdev_priv(vrf_dev); |
402 | struct dst_entry *dst = NULL; |
403 | struct rt6_info *rt6; |
404 | |
405 | rcu_read_lock(); |
406 | |
407 | rt6 = rcu_dereference(vrf->rt6); |
408 | if (likely(rt6)) { |
409 | dst = &rt6->dst; |
410 | dst_hold(dst); |
411 | } |
412 | |
413 | rcu_read_unlock(); |
414 | |
415 | if (unlikely(!dst)) { |
416 | vrf_tx_error(vrf_dev, skb); |
417 | return NULL; |
418 | } |
419 | |
420 | skb_dst_drop(skb); |
421 | skb_dst_set(skb, dst); |
422 | |
423 | return skb; |
424 | } |
425 | |
426 | static int vrf_output6_direct(struct net *net, struct sock *sk, |
427 | struct sk_buff *skb) |
428 | { |
429 | skb->protocol = htons(ETH_P_IPV6); |
430 | |
431 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, |
432 | net, sk, skb, NULL, skb->dev, |
433 | vrf_finish_direct, |
434 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
435 | } |
436 | |
437 | static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, |
438 | struct sock *sk, |
439 | struct sk_buff *skb) |
440 | { |
441 | struct net *net = dev_net(vrf_dev); |
442 | int err; |
443 | |
444 | skb->dev = vrf_dev; |
445 | |
446 | err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, |
447 | skb, NULL, vrf_dev, vrf_output6_direct); |
448 | |
449 | if (likely(err == 1)) |
450 | err = vrf_output6_direct(net, sk, skb); |
451 | |
452 | /* reset skb device */ |
453 | if (likely(err == 1)) |
454 | nf_reset(skb); |
455 | else |
456 | skb = NULL; |
457 | |
458 | return skb; |
459 | } |
460 | |
461 | static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, |
462 | struct sock *sk, |
463 | struct sk_buff *skb) |
464 | { |
465 | /* don't divert link scope packets */ |
466 | if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) |
467 | return skb; |
468 | |
469 | if (qdisc_tx_is_default(vrf_dev)) |
470 | return vrf_ip6_out_direct(vrf_dev, sk, skb); |
471 | |
472 | return vrf_ip6_out_redirect(vrf_dev, skb); |
473 | } |
474 | |
475 | /* holding rtnl */ |
476 | static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) |
477 | { |
478 | struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); |
479 | struct net *net = dev_net(dev); |
480 | struct dst_entry *dst; |
481 | |
482 | RCU_INIT_POINTER(vrf->rt6, NULL); |
483 | synchronize_rcu(); |
484 | |
485 | /* move dev in dst's to loopback so this VRF device can be deleted |
486 | * - based on dst_ifdown |
487 | */ |
488 | if (rt6) { |
489 | dst = &rt6->dst; |
490 | dev_put(dst->dev); |
491 | dst->dev = net->loopback_dev; |
492 | dev_hold(dst->dev); |
493 | dst_release(dst); |
494 | } |
495 | } |
496 | |
497 | static int vrf_rt6_create(struct net_device *dev) |
498 | { |
499 | int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM; |
500 | struct net_vrf *vrf = netdev_priv(dev); |
501 | struct net *net = dev_net(dev); |
502 | struct rt6_info *rt6; |
503 | int rc = -ENOMEM; |
504 | |
505 | /* IPv6 can be CONFIG enabled and then disabled runtime */ |
506 | if (!ipv6_mod_enabled()) |
507 | return 0; |
508 | |
509 | vrf->fib6_table = fib6_new_table(net, vrf->tb_id); |
510 | if (!vrf->fib6_table) |
511 | goto out; |
512 | |
513 | /* create a dst for routing packets out a VRF device */ |
514 | rt6 = ip6_dst_alloc(net, dev, flags); |
515 | if (!rt6) |
516 | goto out; |
517 | |
518 | rt6->dst.output = vrf_output6; |
519 | |
520 | rcu_assign_pointer(vrf->rt6, rt6); |
521 | |
522 | rc = 0; |
523 | out: |
524 | return rc; |
525 | } |
526 | #else |
527 | static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, |
528 | struct sock *sk, |
529 | struct sk_buff *skb) |
530 | { |
531 | return skb; |
532 | } |
533 | |
534 | static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) |
535 | { |
536 | } |
537 | |
538 | static int vrf_rt6_create(struct net_device *dev) |
539 | { |
540 | return 0; |
541 | } |
542 | #endif |
543 | |
544 | /* modelled after ip_finish_output2 */ |
545 | static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
546 | { |
547 | struct dst_entry *dst = skb_dst(skb); |
548 | struct rtable *rt = (struct rtable *)dst; |
549 | struct net_device *dev = dst->dev; |
550 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
551 | struct neighbour *neigh; |
552 | u32 nexthop; |
553 | int ret = -EINVAL; |
554 | |
555 | nf_reset(skb); |
556 | |
557 | /* Be paranoid, rather than too clever. */ |
558 | if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { |
559 | struct sk_buff *skb2; |
560 | |
561 | skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); |
562 | if (!skb2) { |
563 | ret = -ENOMEM; |
564 | goto err; |
565 | } |
566 | if (skb->sk) |
567 | skb_set_owner_w(skb2, skb->sk); |
568 | |
569 | consume_skb(skb); |
570 | skb = skb2; |
571 | } |
572 | |
573 | rcu_read_lock_bh(); |
574 | |
575 | nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); |
576 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); |
577 | if (unlikely(!neigh)) |
578 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); |
579 | if (!IS_ERR(neigh)) { |
580 | sock_confirm_neigh(skb, neigh); |
581 | ret = neigh_output(neigh, skb); |
582 | rcu_read_unlock_bh(); |
583 | return ret; |
584 | } |
585 | |
586 | rcu_read_unlock_bh(); |
587 | err: |
588 | vrf_tx_error(skb->dev, skb); |
589 | return ret; |
590 | } |
591 | |
592 | static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
593 | { |
594 | struct net_device *dev = skb_dst(skb)->dev; |
595 | |
596 | IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); |
597 | |
598 | skb->dev = dev; |
599 | skb->protocol = htons(ETH_P_IP); |
600 | |
601 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, |
602 | net, sk, skb, NULL, dev, |
603 | vrf_finish_output, |
604 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
605 | } |
606 | |
607 | /* set dst on skb to send packet to us via dev_xmit path. Allows |
608 | * packet to go through device based features such as qdisc, netfilter |
609 | * hooks and packet sockets with skb->dev set to vrf device. |
610 | */ |
611 | static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, |
612 | struct sk_buff *skb) |
613 | { |
614 | struct net_vrf *vrf = netdev_priv(vrf_dev); |
615 | struct dst_entry *dst = NULL; |
616 | struct rtable *rth; |
617 | |
618 | rcu_read_lock(); |
619 | |
620 | rth = rcu_dereference(vrf->rth); |
621 | if (likely(rth)) { |
622 | dst = &rth->dst; |
623 | dst_hold(dst); |
624 | } |
625 | |
626 | rcu_read_unlock(); |
627 | |
628 | if (unlikely(!dst)) { |
629 | vrf_tx_error(vrf_dev, skb); |
630 | return NULL; |
631 | } |
632 | |
633 | skb_dst_drop(skb); |
634 | skb_dst_set(skb, dst); |
635 | |
636 | return skb; |
637 | } |
638 | |
639 | static int vrf_output_direct(struct net *net, struct sock *sk, |
640 | struct sk_buff *skb) |
641 | { |
642 | skb->protocol = htons(ETH_P_IP); |
643 | |
644 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, |
645 | net, sk, skb, NULL, skb->dev, |
646 | vrf_finish_direct, |
647 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
648 | } |
649 | |
650 | static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, |
651 | struct sock *sk, |
652 | struct sk_buff *skb) |
653 | { |
654 | struct net *net = dev_net(vrf_dev); |
655 | int err; |
656 | |
657 | skb->dev = vrf_dev; |
658 | |
659 | err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, |
660 | skb, NULL, vrf_dev, vrf_output_direct); |
661 | |
662 | if (likely(err == 1)) |
663 | err = vrf_output_direct(net, sk, skb); |
664 | |
665 | /* reset skb device */ |
666 | if (likely(err == 1)) |
667 | nf_reset(skb); |
668 | else |
669 | skb = NULL; |
670 | |
671 | return skb; |
672 | } |
673 | |
674 | static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, |
675 | struct sock *sk, |
676 | struct sk_buff *skb) |
677 | { |
678 | /* don't divert multicast or local broadcast */ |
679 | if (ipv4_is_multicast(ip_hdr(skb)->daddr) || |
680 | ipv4_is_lbcast(ip_hdr(skb)->daddr)) |
681 | return skb; |
682 | |
683 | if (qdisc_tx_is_default(vrf_dev)) |
684 | return vrf_ip_out_direct(vrf_dev, sk, skb); |
685 | |
686 | return vrf_ip_out_redirect(vrf_dev, skb); |
687 | } |
688 | |
689 | /* called with rcu lock held */ |
690 | static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev, |
691 | struct sock *sk, |
692 | struct sk_buff *skb, |
693 | u16 proto) |
694 | { |
695 | switch (proto) { |
696 | case AF_INET: |
697 | return vrf_ip_out(vrf_dev, sk, skb); |
698 | case AF_INET6: |
699 | return vrf_ip6_out(vrf_dev, sk, skb); |
700 | } |
701 | |
702 | return skb; |
703 | } |
704 | |
705 | /* holding rtnl */ |
706 | static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) |
707 | { |
708 | struct rtable *rth = rtnl_dereference(vrf->rth); |
709 | struct net *net = dev_net(dev); |
710 | struct dst_entry *dst; |
711 | |
712 | RCU_INIT_POINTER(vrf->rth, NULL); |
713 | synchronize_rcu(); |
714 | |
715 | /* move dev in dst's to loopback so this VRF device can be deleted |
716 | * - based on dst_ifdown |
717 | */ |
718 | if (rth) { |
719 | dst = &rth->dst; |
720 | dev_put(dst->dev); |
721 | dst->dev = net->loopback_dev; |
722 | dev_hold(dst->dev); |
723 | dst_release(dst); |
724 | } |
725 | } |
726 | |
727 | static int vrf_rtable_create(struct net_device *dev) |
728 | { |
729 | struct net_vrf *vrf = netdev_priv(dev); |
730 | struct rtable *rth; |
731 | |
732 | if (!fib_new_table(dev_net(dev), vrf->tb_id)) |
733 | return -ENOMEM; |
734 | |
735 | /* create a dst for routing packets out through a VRF device */ |
736 | rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); |
737 | if (!rth) |
738 | return -ENOMEM; |
739 | |
740 | rth->dst.output = vrf_output; |
741 | |
742 | rcu_assign_pointer(vrf->rth, rth); |
743 | |
744 | return 0; |
745 | } |
746 | |
747 | /**************************** device handling ********************/ |
748 | |
749 | /* cycle interface to flush neighbor cache and move routes across tables */ |
750 | static void cycle_netdev(struct net_device *dev, |
751 | struct netlink_ext_ack *extack) |
752 | { |
753 | unsigned int flags = dev->flags; |
754 | int ret; |
755 | |
756 | if (!netif_running(dev)) |
757 | return; |
758 | |
759 | ret = dev_change_flags(dev, flags & ~IFF_UP, extack); |
760 | if (ret >= 0) |
761 | ret = dev_change_flags(dev, flags, extack); |
762 | |
763 | if (ret < 0) { |
764 | netdev_err(dev, |
765 | "Failed to cycle device %s; route tables might be wrong!\n" , |
766 | dev->name); |
767 | } |
768 | } |
769 | |
770 | static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev, |
771 | struct netlink_ext_ack *extack) |
772 | { |
773 | int ret; |
774 | |
775 | /* do not allow loopback device to be enslaved to a VRF. |
776 | * The vrf device acts as the loopback for the vrf. |
777 | */ |
778 | if (port_dev == dev_net(dev)->loopback_dev) { |
779 | NL_SET_ERR_MSG(extack, |
780 | "Can not enslave loopback device to a VRF" ); |
781 | return -EOPNOTSUPP; |
782 | } |
783 | |
784 | port_dev->priv_flags |= IFF_L3MDEV_SLAVE; |
785 | ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack); |
786 | if (ret < 0) |
787 | goto err; |
788 | |
789 | cycle_netdev(port_dev, extack); |
790 | |
791 | return 0; |
792 | |
793 | err: |
794 | port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; |
795 | return ret; |
796 | } |
797 | |
798 | static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev, |
799 | struct netlink_ext_ack *extack) |
800 | { |
801 | if (netif_is_l3_master(port_dev)) { |
802 | NL_SET_ERR_MSG(extack, |
803 | "Can not enslave an L3 master device to a VRF" ); |
804 | return -EINVAL; |
805 | } |
806 | |
807 | if (netif_is_l3_slave(port_dev)) |
808 | return -EINVAL; |
809 | |
810 | return do_vrf_add_slave(dev, port_dev, extack); |
811 | } |
812 | |
813 | /* inverse of do_vrf_add_slave */ |
814 | static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) |
815 | { |
816 | netdev_upper_dev_unlink(port_dev, dev); |
817 | port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; |
818 | |
819 | cycle_netdev(port_dev, NULL); |
820 | |
821 | return 0; |
822 | } |
823 | |
824 | static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) |
825 | { |
826 | return do_vrf_del_slave(dev, port_dev); |
827 | } |
828 | |
829 | static void vrf_dev_uninit(struct net_device *dev) |
830 | { |
831 | struct net_vrf *vrf = netdev_priv(dev); |
832 | |
833 | vrf_rtable_release(dev, vrf); |
834 | vrf_rt6_release(dev, vrf); |
835 | |
836 | free_percpu(dev->dstats); |
837 | dev->dstats = NULL; |
838 | } |
839 | |
840 | static int vrf_dev_init(struct net_device *dev) |
841 | { |
842 | struct net_vrf *vrf = netdev_priv(dev); |
843 | |
844 | dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); |
845 | if (!dev->dstats) |
846 | goto out_nomem; |
847 | |
848 | /* create the default dst which points back to us */ |
849 | if (vrf_rtable_create(dev) != 0) |
850 | goto out_stats; |
851 | |
852 | if (vrf_rt6_create(dev) != 0) |
853 | goto out_rth; |
854 | |
855 | dev->flags = IFF_MASTER | IFF_NOARP; |
856 | |
857 | /* MTU is irrelevant for VRF device; set to 64k similar to lo */ |
858 | dev->mtu = 64 * 1024; |
859 | |
860 | /* similarly, oper state is irrelevant; set to up to avoid confusion */ |
861 | dev->operstate = IF_OPER_UP; |
862 | netdev_lockdep_set_classes(dev); |
863 | return 0; |
864 | |
865 | out_rth: |
866 | vrf_rtable_release(dev, vrf); |
867 | out_stats: |
868 | free_percpu(dev->dstats); |
869 | dev->dstats = NULL; |
870 | out_nomem: |
871 | return -ENOMEM; |
872 | } |
873 | |
874 | static const struct net_device_ops vrf_netdev_ops = { |
875 | .ndo_init = vrf_dev_init, |
876 | .ndo_uninit = vrf_dev_uninit, |
877 | .ndo_start_xmit = vrf_xmit, |
878 | .ndo_get_stats64 = vrf_get_stats64, |
879 | .ndo_add_slave = vrf_add_slave, |
880 | .ndo_del_slave = vrf_del_slave, |
881 | }; |
882 | |
883 | static u32 vrf_fib_table(const struct net_device *dev) |
884 | { |
885 | struct net_vrf *vrf = netdev_priv(dev); |
886 | |
887 | return vrf->tb_id; |
888 | } |
889 | |
890 | static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
891 | { |
892 | kfree_skb(skb); |
893 | return 0; |
894 | } |
895 | |
896 | static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, |
897 | struct sk_buff *skb, |
898 | struct net_device *dev) |
899 | { |
900 | struct net *net = dev_net(dev); |
901 | |
902 | if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1) |
903 | skb = NULL; /* kfree_skb(skb) handled by nf code */ |
904 | |
905 | return skb; |
906 | } |
907 | |
908 | #if IS_ENABLED(CONFIG_IPV6) |
909 | /* neighbor handling is done with actual device; do not want |
910 | * to flip skb->dev for those ndisc packets. This really fails |
911 | * for multiple next protocols (e.g., NEXTHDR_HOP). But it is |
912 | * a start. |
913 | */ |
914 | static bool ipv6_ndisc_frame(const struct sk_buff *skb) |
915 | { |
916 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
917 | bool rc = false; |
918 | |
919 | if (iph->nexthdr == NEXTHDR_ICMP) { |
920 | const struct icmp6hdr *icmph; |
921 | struct icmp6hdr _icmph; |
922 | |
923 | icmph = skb_header_pointer(skb, sizeof(*iph), |
924 | sizeof(_icmph), &_icmph); |
925 | if (!icmph) |
926 | goto out; |
927 | |
928 | switch (icmph->icmp6_type) { |
929 | case NDISC_ROUTER_SOLICITATION: |
930 | case NDISC_ROUTER_ADVERTISEMENT: |
931 | case NDISC_NEIGHBOUR_SOLICITATION: |
932 | case NDISC_NEIGHBOUR_ADVERTISEMENT: |
933 | case NDISC_REDIRECT: |
934 | rc = true; |
935 | break; |
936 | } |
937 | } |
938 | |
939 | out: |
940 | return rc; |
941 | } |
942 | |
943 | static struct rt6_info *vrf_ip6_route_lookup(struct net *net, |
944 | const struct net_device *dev, |
945 | struct flowi6 *fl6, |
946 | int ifindex, |
947 | const struct sk_buff *skb, |
948 | int flags) |
949 | { |
950 | struct net_vrf *vrf = netdev_priv(dev); |
951 | |
952 | return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags); |
953 | } |
954 | |
955 | static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, |
956 | int ifindex) |
957 | { |
958 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
959 | struct flowi6 fl6 = { |
960 | .flowi6_iif = ifindex, |
961 | .flowi6_mark = skb->mark, |
962 | .flowi6_proto = iph->nexthdr, |
963 | .daddr = iph->daddr, |
964 | .saddr = iph->saddr, |
965 | .flowlabel = ip6_flowinfo(iph), |
966 | }; |
967 | struct net *net = dev_net(vrf_dev); |
968 | struct rt6_info *rt6; |
969 | |
970 | rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb, |
971 | RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); |
972 | if (unlikely(!rt6)) |
973 | return; |
974 | |
975 | if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst)) |
976 | return; |
977 | |
978 | skb_dst_set(skb, &rt6->dst); |
979 | } |
980 | |
981 | static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, |
982 | struct sk_buff *skb) |
983 | { |
984 | int orig_iif = skb->skb_iif; |
985 | bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); |
986 | bool is_ndisc = ipv6_ndisc_frame(skb); |
987 | |
988 | /* loopback, multicast & non-ND link-local traffic; do not push through |
989 | * packet taps again. Reset pkt_type for upper layers to process skb |
990 | */ |
991 | if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { |
992 | skb->dev = vrf_dev; |
993 | skb->skb_iif = vrf_dev->ifindex; |
994 | IP6CB(skb)->flags |= IP6SKB_L3SLAVE; |
995 | if (skb->pkt_type == PACKET_LOOPBACK) |
996 | skb->pkt_type = PACKET_HOST; |
997 | goto out; |
998 | } |
999 | |
1000 | /* if packet is NDISC then keep the ingress interface */ |
1001 | if (!is_ndisc) { |
1002 | vrf_rx_stats(vrf_dev, skb->len); |
1003 | skb->dev = vrf_dev; |
1004 | skb->skb_iif = vrf_dev->ifindex; |
1005 | |
1006 | if (!list_empty(&vrf_dev->ptype_all)) { |
1007 | skb_push(skb, skb->mac_len); |
1008 | dev_queue_xmit_nit(skb, vrf_dev); |
1009 | skb_pull(skb, skb->mac_len); |
1010 | } |
1011 | |
1012 | IP6CB(skb)->flags |= IP6SKB_L3SLAVE; |
1013 | } |
1014 | |
1015 | if (need_strict) |
1016 | vrf_ip6_input_dst(skb, vrf_dev, orig_iif); |
1017 | |
1018 | skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev); |
1019 | out: |
1020 | return skb; |
1021 | } |
1022 | |
1023 | #else |
1024 | static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, |
1025 | struct sk_buff *skb) |
1026 | { |
1027 | return skb; |
1028 | } |
1029 | #endif |
1030 | |
1031 | static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, |
1032 | struct sk_buff *skb) |
1033 | { |
1034 | skb->dev = vrf_dev; |
1035 | skb->skb_iif = vrf_dev->ifindex; |
1036 | IPCB(skb)->flags |= IPSKB_L3SLAVE; |
1037 | |
1038 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) |
1039 | goto out; |
1040 | |
1041 | /* loopback traffic; do not push through packet taps again. |
1042 | * Reset pkt_type for upper layers to process skb |
1043 | */ |
1044 | if (skb->pkt_type == PACKET_LOOPBACK) { |
1045 | skb->pkt_type = PACKET_HOST; |
1046 | goto out; |
1047 | } |
1048 | |
1049 | vrf_rx_stats(vrf_dev, skb->len); |
1050 | |
1051 | if (!list_empty(&vrf_dev->ptype_all)) { |
1052 | skb_push(skb, skb->mac_len); |
1053 | dev_queue_xmit_nit(skb, vrf_dev); |
1054 | skb_pull(skb, skb->mac_len); |
1055 | } |
1056 | |
1057 | skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev); |
1058 | out: |
1059 | return skb; |
1060 | } |
1061 | |
1062 | /* called with rcu lock held */ |
1063 | static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, |
1064 | struct sk_buff *skb, |
1065 | u16 proto) |
1066 | { |
1067 | switch (proto) { |
1068 | case AF_INET: |
1069 | return vrf_ip_rcv(vrf_dev, skb); |
1070 | case AF_INET6: |
1071 | return vrf_ip6_rcv(vrf_dev, skb); |
1072 | } |
1073 | |
1074 | return skb; |
1075 | } |
1076 | |
1077 | #if IS_ENABLED(CONFIG_IPV6) |
1078 | /* send to link-local or multicast address via interface enslaved to |
1079 | * VRF device. Force lookup to VRF table without changing flow struct |
1080 | */ |
1081 | static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev, |
1082 | struct flowi6 *fl6) |
1083 | { |
1084 | struct net *net = dev_net(dev); |
1085 | int flags = RT6_LOOKUP_F_IFACE; |
1086 | struct dst_entry *dst = NULL; |
1087 | struct rt6_info *rt; |
1088 | |
1089 | /* VRF device does not have a link-local address and |
1090 | * sending packets to link-local or mcast addresses over |
1091 | * a VRF device does not make sense |
1092 | */ |
1093 | if (fl6->flowi6_oif == dev->ifindex) { |
1094 | dst = &net->ipv6.ip6_null_entry->dst; |
1095 | dst_hold(dst); |
1096 | return dst; |
1097 | } |
1098 | |
1099 | if (!ipv6_addr_any(&fl6->saddr)) |
1100 | flags |= RT6_LOOKUP_F_HAS_SADDR; |
1101 | |
1102 | rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags); |
1103 | if (rt) |
1104 | dst = &rt->dst; |
1105 | |
1106 | return dst; |
1107 | } |
1108 | #endif |
1109 | |
1110 | static const struct l3mdev_ops vrf_l3mdev_ops = { |
1111 | .l3mdev_fib_table = vrf_fib_table, |
1112 | .l3mdev_l3_rcv = vrf_l3_rcv, |
1113 | .l3mdev_l3_out = vrf_l3_out, |
1114 | #if IS_ENABLED(CONFIG_IPV6) |
1115 | .l3mdev_link_scope_lookup = vrf_link_scope_lookup, |
1116 | #endif |
1117 | }; |
1118 | |
1119 | static void vrf_get_drvinfo(struct net_device *dev, |
1120 | struct ethtool_drvinfo *info) |
1121 | { |
1122 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
1123 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); |
1124 | } |
1125 | |
1126 | static const struct ethtool_ops vrf_ethtool_ops = { |
1127 | .get_drvinfo = vrf_get_drvinfo, |
1128 | }; |
1129 | |
1130 | static inline size_t vrf_fib_rule_nl_size(void) |
1131 | { |
1132 | size_t sz; |
1133 | |
1134 | sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); |
1135 | sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ |
1136 | sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ |
1137 | sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */ |
1138 | |
1139 | return sz; |
1140 | } |
1141 | |
1142 | static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) |
1143 | { |
1144 | struct fib_rule_hdr *frh; |
1145 | struct nlmsghdr *nlh; |
1146 | struct sk_buff *skb; |
1147 | int err; |
1148 | |
1149 | if (family == AF_INET6 && !ipv6_mod_enabled()) |
1150 | return 0; |
1151 | |
1152 | skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); |
1153 | if (!skb) |
1154 | return -ENOMEM; |
1155 | |
1156 | nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0); |
1157 | if (!nlh) |
1158 | goto nla_put_failure; |
1159 | |
1160 | /* rule only needs to appear once */ |
1161 | nlh->nlmsg_flags |= NLM_F_EXCL; |
1162 | |
1163 | frh = nlmsg_data(nlh); |
1164 | memset(frh, 0, sizeof(*frh)); |
1165 | frh->family = family; |
1166 | frh->action = FR_ACT_TO_TBL; |
1167 | |
1168 | if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL)) |
1169 | goto nla_put_failure; |
1170 | |
1171 | if (nla_put_u8(skb, FRA_L3MDEV, 1)) |
1172 | goto nla_put_failure; |
1173 | |
1174 | if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF)) |
1175 | goto nla_put_failure; |
1176 | |
1177 | nlmsg_end(skb, nlh); |
1178 | |
1179 | /* fib_nl_{new,del}rule handling looks for net from skb->sk */ |
1180 | skb->sk = dev_net(dev)->rtnl; |
1181 | if (add_it) { |
1182 | err = fib_nl_newrule(skb, nlh, NULL); |
1183 | if (err == -EEXIST) |
1184 | err = 0; |
1185 | } else { |
1186 | err = fib_nl_delrule(skb, nlh, NULL); |
1187 | if (err == -ENOENT) |
1188 | err = 0; |
1189 | } |
1190 | nlmsg_free(skb); |
1191 | |
1192 | return err; |
1193 | |
1194 | nla_put_failure: |
1195 | nlmsg_free(skb); |
1196 | |
1197 | return -EMSGSIZE; |
1198 | } |
1199 | |
1200 | static int vrf_add_fib_rules(const struct net_device *dev) |
1201 | { |
1202 | int err; |
1203 | |
1204 | err = vrf_fib_rule(dev, AF_INET, true); |
1205 | if (err < 0) |
1206 | goto out_err; |
1207 | |
1208 | err = vrf_fib_rule(dev, AF_INET6, true); |
1209 | if (err < 0) |
1210 | goto ipv6_err; |
1211 | |
1212 | #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) |
1213 | err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true); |
1214 | if (err < 0) |
1215 | goto ipmr_err; |
1216 | #endif |
1217 | |
1218 | #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) |
1219 | err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true); |
1220 | if (err < 0) |
1221 | goto ip6mr_err; |
1222 | #endif |
1223 | |
1224 | return 0; |
1225 | |
1226 | #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) |
1227 | ip6mr_err: |
1228 | vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false); |
1229 | #endif |
1230 | |
1231 | #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) |
1232 | ipmr_err: |
1233 | vrf_fib_rule(dev, AF_INET6, false); |
1234 | #endif |
1235 | |
1236 | ipv6_err: |
1237 | vrf_fib_rule(dev, AF_INET, false); |
1238 | |
1239 | out_err: |
1240 | netdev_err(dev, "Failed to add FIB rules.\n" ); |
1241 | return err; |
1242 | } |
1243 | |
1244 | static void vrf_setup(struct net_device *dev) |
1245 | { |
1246 | ether_setup(dev); |
1247 | |
1248 | /* Initialize the device structure. */ |
1249 | dev->netdev_ops = &vrf_netdev_ops; |
1250 | dev->l3mdev_ops = &vrf_l3mdev_ops; |
1251 | dev->ethtool_ops = &vrf_ethtool_ops; |
1252 | dev->needs_free_netdev = true; |
1253 | |
1254 | /* Fill in device structure with ethernet-generic values. */ |
1255 | eth_hw_addr_random(dev); |
1256 | |
1257 | /* don't acquire vrf device's netif_tx_lock when transmitting */ |
1258 | dev->features |= NETIF_F_LLTX; |
1259 | |
1260 | /* don't allow vrf devices to change network namespaces. */ |
1261 | dev->features |= NETIF_F_NETNS_LOCAL; |
1262 | |
1263 | /* does not make sense for a VLAN to be added to a vrf device */ |
1264 | dev->features |= NETIF_F_VLAN_CHALLENGED; |
1265 | |
1266 | /* enable offload features */ |
1267 | dev->features |= NETIF_F_GSO_SOFTWARE; |
1268 | dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; |
1269 | dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; |
1270 | |
1271 | dev->hw_features = dev->features; |
1272 | dev->hw_enc_features = dev->features; |
1273 | |
1274 | /* default to no qdisc; user can add if desired */ |
1275 | dev->priv_flags |= IFF_NO_QUEUE; |
1276 | |
1277 | dev->min_mtu = 0; |
1278 | dev->max_mtu = 0; |
1279 | } |
1280 | |
1281 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], |
1282 | struct netlink_ext_ack *extack) |
1283 | { |
1284 | if (tb[IFLA_ADDRESS]) { |
1285 | if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { |
1286 | NL_SET_ERR_MSG(extack, "Invalid hardware address" ); |
1287 | return -EINVAL; |
1288 | } |
1289 | if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { |
1290 | NL_SET_ERR_MSG(extack, "Invalid hardware address" ); |
1291 | return -EADDRNOTAVAIL; |
1292 | } |
1293 | } |
1294 | return 0; |
1295 | } |
1296 | |
1297 | static void vrf_dellink(struct net_device *dev, struct list_head *head) |
1298 | { |
1299 | struct net_device *port_dev; |
1300 | struct list_head *iter; |
1301 | |
1302 | netdev_for_each_lower_dev(dev, port_dev, iter) |
1303 | vrf_del_slave(dev, port_dev); |
1304 | |
1305 | unregister_netdevice_queue(dev, head); |
1306 | } |
1307 | |
1308 | static int vrf_newlink(struct net *src_net, struct net_device *dev, |
1309 | struct nlattr *tb[], struct nlattr *data[], |
1310 | struct netlink_ext_ack *extack) |
1311 | { |
1312 | struct net_vrf *vrf = netdev_priv(dev); |
1313 | bool *add_fib_rules; |
1314 | struct net *net; |
1315 | int err; |
1316 | |
1317 | if (!data || !data[IFLA_VRF_TABLE]) { |
1318 | NL_SET_ERR_MSG(extack, "VRF table id is missing" ); |
1319 | return -EINVAL; |
1320 | } |
1321 | |
1322 | vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); |
1323 | if (vrf->tb_id == RT_TABLE_UNSPEC) { |
1324 | NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE], |
1325 | "Invalid VRF table id" ); |
1326 | return -EINVAL; |
1327 | } |
1328 | |
1329 | dev->priv_flags |= IFF_L3MDEV_MASTER; |
1330 | |
1331 | err = register_netdevice(dev); |
1332 | if (err) |
1333 | goto out; |
1334 | |
1335 | net = dev_net(dev); |
1336 | add_fib_rules = net_generic(net, vrf_net_id); |
1337 | if (*add_fib_rules) { |
1338 | err = vrf_add_fib_rules(dev); |
1339 | if (err) { |
1340 | unregister_netdevice(dev); |
1341 | goto out; |
1342 | } |
1343 | *add_fib_rules = false; |
1344 | } |
1345 | |
1346 | out: |
1347 | return err; |
1348 | } |
1349 | |
1350 | static size_t vrf_nl_getsize(const struct net_device *dev) |
1351 | { |
1352 | return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */ |
1353 | } |
1354 | |
1355 | static int vrf_fillinfo(struct sk_buff *skb, |
1356 | const struct net_device *dev) |
1357 | { |
1358 | struct net_vrf *vrf = netdev_priv(dev); |
1359 | |
1360 | return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); |
1361 | } |
1362 | |
1363 | static size_t vrf_get_slave_size(const struct net_device *bond_dev, |
1364 | const struct net_device *slave_dev) |
1365 | { |
1366 | return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */ |
1367 | } |
1368 | |
1369 | static int vrf_fill_slave_info(struct sk_buff *skb, |
1370 | const struct net_device *vrf_dev, |
1371 | const struct net_device *slave_dev) |
1372 | { |
1373 | struct net_vrf *vrf = netdev_priv(vrf_dev); |
1374 | |
1375 | if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id)) |
1376 | return -EMSGSIZE; |
1377 | |
1378 | return 0; |
1379 | } |
1380 | |
1381 | static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { |
1382 | [IFLA_VRF_TABLE] = { .type = NLA_U32 }, |
1383 | }; |
1384 | |
1385 | static struct rtnl_link_ops vrf_link_ops __read_mostly = { |
1386 | .kind = DRV_NAME, |
1387 | .priv_size = sizeof(struct net_vrf), |
1388 | |
1389 | .get_size = vrf_nl_getsize, |
1390 | .policy = vrf_nl_policy, |
1391 | .validate = vrf_validate, |
1392 | .fill_info = vrf_fillinfo, |
1393 | |
1394 | .get_slave_size = vrf_get_slave_size, |
1395 | .fill_slave_info = vrf_fill_slave_info, |
1396 | |
1397 | .newlink = vrf_newlink, |
1398 | .dellink = vrf_dellink, |
1399 | .setup = vrf_setup, |
1400 | .maxtype = IFLA_VRF_MAX, |
1401 | }; |
1402 | |
1403 | static int vrf_device_event(struct notifier_block *unused, |
1404 | unsigned long event, void *ptr) |
1405 | { |
1406 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
1407 | |
1408 | /* only care about unregister events to drop slave references */ |
1409 | if (event == NETDEV_UNREGISTER) { |
1410 | struct net_device *vrf_dev; |
1411 | |
1412 | if (!netif_is_l3_slave(dev)) |
1413 | goto out; |
1414 | |
1415 | vrf_dev = netdev_master_upper_dev_get(dev); |
1416 | vrf_del_slave(vrf_dev, dev); |
1417 | } |
1418 | out: |
1419 | return NOTIFY_DONE; |
1420 | } |
1421 | |
1422 | static struct notifier_block vrf_notifier_block __read_mostly = { |
1423 | .notifier_call = vrf_device_event, |
1424 | }; |
1425 | |
1426 | /* Initialize per network namespace state */ |
1427 | static int __net_init vrf_netns_init(struct net *net) |
1428 | { |
1429 | bool *add_fib_rules = net_generic(net, vrf_net_id); |
1430 | |
1431 | *add_fib_rules = true; |
1432 | |
1433 | return 0; |
1434 | } |
1435 | |
1436 | static struct pernet_operations vrf_net_ops __net_initdata = { |
1437 | .init = vrf_netns_init, |
1438 | .id = &vrf_net_id, |
1439 | .size = sizeof(bool), |
1440 | }; |
1441 | |
1442 | static int __init vrf_init_module(void) |
1443 | { |
1444 | int rc; |
1445 | |
1446 | register_netdevice_notifier(&vrf_notifier_block); |
1447 | |
1448 | rc = register_pernet_subsys(&vrf_net_ops); |
1449 | if (rc < 0) |
1450 | goto error; |
1451 | |
1452 | rc = rtnl_link_register(&vrf_link_ops); |
1453 | if (rc < 0) { |
1454 | unregister_pernet_subsys(&vrf_net_ops); |
1455 | goto error; |
1456 | } |
1457 | |
1458 | return 0; |
1459 | |
1460 | error: |
1461 | unregister_netdevice_notifier(&vrf_notifier_block); |
1462 | return rc; |
1463 | } |
1464 | |
1465 | module_init(vrf_init_module); |
1466 | MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern" ); |
1467 | MODULE_DESCRIPTION("Device driver to instantiate VRF domains" ); |
1468 | MODULE_LICENSE("GPL" ); |
1469 | MODULE_ALIAS_RTNL_LINK(DRV_NAME); |
1470 | MODULE_VERSION(DRV_VERSION); |
1471 | |