1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. |
5 | * |
6 | * IPv4 Forwarding Information Base: FIB frontend. |
7 | * |
8 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version |
13 | * 2 of the License, or (at your option) any later version. |
14 | */ |
15 | |
16 | #include <linux/module.h> |
17 | #include <linux/uaccess.h> |
18 | #include <linux/bitops.h> |
19 | #include <linux/capability.h> |
20 | #include <linux/types.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/string.h> |
24 | #include <linux/socket.h> |
25 | #include <linux/sockios.h> |
26 | #include <linux/errno.h> |
27 | #include <linux/in.h> |
28 | #include <linux/inet.h> |
29 | #include <linux/inetdevice.h> |
30 | #include <linux/netdevice.h> |
31 | #include <linux/if_addr.h> |
32 | #include <linux/if_arp.h> |
33 | #include <linux/skbuff.h> |
34 | #include <linux/cache.h> |
35 | #include <linux/init.h> |
36 | #include <linux/list.h> |
37 | #include <linux/slab.h> |
38 | |
39 | #include <net/ip.h> |
40 | #include <net/protocol.h> |
41 | #include <net/route.h> |
42 | #include <net/tcp.h> |
43 | #include <net/sock.h> |
44 | #include <net/arp.h> |
45 | #include <net/ip_fib.h> |
46 | #include <net/rtnetlink.h> |
47 | #include <net/xfrm.h> |
48 | #include <net/l3mdev.h> |
49 | #include <net/lwtunnel.h> |
50 | #include <trace/events/fib.h> |
51 | |
52 | #ifndef CONFIG_IP_MULTIPLE_TABLES |
53 | |
54 | static int __net_init fib4_rules_init(struct net *net) |
55 | { |
56 | struct fib_table *local_table, *main_table; |
57 | |
58 | main_table = fib_trie_table(RT_TABLE_MAIN, NULL); |
59 | if (!main_table) |
60 | return -ENOMEM; |
61 | |
62 | local_table = fib_trie_table(RT_TABLE_LOCAL, main_table); |
63 | if (!local_table) |
64 | goto fail; |
65 | |
66 | hlist_add_head_rcu(&local_table->tb_hlist, |
67 | &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]); |
68 | hlist_add_head_rcu(&main_table->tb_hlist, |
69 | &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]); |
70 | return 0; |
71 | |
72 | fail: |
73 | fib_free_table(main_table); |
74 | return -ENOMEM; |
75 | } |
76 | |
77 | static bool fib4_has_custom_rules(struct net *net) |
78 | { |
79 | return false; |
80 | } |
81 | #else |
82 | |
83 | struct fib_table *fib_new_table(struct net *net, u32 id) |
84 | { |
85 | struct fib_table *tb, *alias = NULL; |
86 | unsigned int h; |
87 | |
88 | if (id == 0) |
89 | id = RT_TABLE_MAIN; |
90 | tb = fib_get_table(net, id); |
91 | if (tb) |
92 | return tb; |
93 | |
94 | if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules) |
95 | alias = fib_new_table(net, RT_TABLE_MAIN); |
96 | |
97 | tb = fib_trie_table(id, alias); |
98 | if (!tb) |
99 | return NULL; |
100 | |
101 | switch (id) { |
102 | case RT_TABLE_MAIN: |
103 | rcu_assign_pointer(net->ipv4.fib_main, tb); |
104 | break; |
105 | case RT_TABLE_DEFAULT: |
106 | rcu_assign_pointer(net->ipv4.fib_default, tb); |
107 | break; |
108 | default: |
109 | break; |
110 | } |
111 | |
112 | h = id & (FIB_TABLE_HASHSZ - 1); |
113 | hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); |
114 | return tb; |
115 | } |
116 | EXPORT_SYMBOL_GPL(fib_new_table); |
117 | |
118 | /* caller must hold either rtnl or rcu read lock */ |
119 | struct fib_table *fib_get_table(struct net *net, u32 id) |
120 | { |
121 | struct fib_table *tb; |
122 | struct hlist_head *head; |
123 | unsigned int h; |
124 | |
125 | if (id == 0) |
126 | id = RT_TABLE_MAIN; |
127 | h = id & (FIB_TABLE_HASHSZ - 1); |
128 | |
129 | head = &net->ipv4.fib_table_hash[h]; |
130 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
131 | if (tb->tb_id == id) |
132 | return tb; |
133 | } |
134 | return NULL; |
135 | } |
136 | |
137 | static bool fib4_has_custom_rules(struct net *net) |
138 | { |
139 | return net->ipv4.fib_has_custom_rules; |
140 | } |
141 | #endif /* CONFIG_IP_MULTIPLE_TABLES */ |
142 | |
143 | static void fib_replace_table(struct net *net, struct fib_table *old, |
144 | struct fib_table *new) |
145 | { |
146 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
147 | switch (new->tb_id) { |
148 | case RT_TABLE_MAIN: |
149 | rcu_assign_pointer(net->ipv4.fib_main, new); |
150 | break; |
151 | case RT_TABLE_DEFAULT: |
152 | rcu_assign_pointer(net->ipv4.fib_default, new); |
153 | break; |
154 | default: |
155 | break; |
156 | } |
157 | |
158 | #endif |
159 | /* replace the old table in the hlist */ |
160 | hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist); |
161 | } |
162 | |
163 | int fib_unmerge(struct net *net) |
164 | { |
165 | struct fib_table *old, *new, *main_table; |
166 | |
167 | /* attempt to fetch local table if it has been allocated */ |
168 | old = fib_get_table(net, RT_TABLE_LOCAL); |
169 | if (!old) |
170 | return 0; |
171 | |
172 | new = fib_trie_unmerge(old); |
173 | if (!new) |
174 | return -ENOMEM; |
175 | |
176 | /* table is already unmerged */ |
177 | if (new == old) |
178 | return 0; |
179 | |
180 | /* replace merged table with clean table */ |
181 | fib_replace_table(net, old, new); |
182 | fib_free_table(old); |
183 | |
184 | /* attempt to fetch main table if it has been allocated */ |
185 | main_table = fib_get_table(net, RT_TABLE_MAIN); |
186 | if (!main_table) |
187 | return 0; |
188 | |
189 | /* flush local entries from main table */ |
190 | fib_table_flush_external(main_table); |
191 | |
192 | return 0; |
193 | } |
194 | |
195 | static void fib_flush(struct net *net) |
196 | { |
197 | int flushed = 0; |
198 | unsigned int h; |
199 | |
200 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
201 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
202 | struct hlist_node *tmp; |
203 | struct fib_table *tb; |
204 | |
205 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) |
206 | flushed += fib_table_flush(net, tb, false); |
207 | } |
208 | |
209 | if (flushed) |
210 | rt_cache_flush(net); |
211 | } |
212 | |
213 | /* |
214 | * Find address type as if only "dev" was present in the system. If |
215 | * on_dev is NULL then all interfaces are taken into consideration. |
216 | */ |
217 | static inline unsigned int __inet_dev_addr_type(struct net *net, |
218 | const struct net_device *dev, |
219 | __be32 addr, u32 tb_id) |
220 | { |
221 | struct flowi4 fl4 = { .daddr = addr }; |
222 | struct fib_result res; |
223 | unsigned int ret = RTN_BROADCAST; |
224 | struct fib_table *table; |
225 | |
226 | if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) |
227 | return RTN_BROADCAST; |
228 | if (ipv4_is_multicast(addr)) |
229 | return RTN_MULTICAST; |
230 | |
231 | rcu_read_lock(); |
232 | |
233 | table = fib_get_table(net, tb_id); |
234 | if (table) { |
235 | ret = RTN_UNICAST; |
236 | if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) { |
237 | if (!dev || dev == res.fi->fib_dev) |
238 | ret = res.type; |
239 | } |
240 | } |
241 | |
242 | rcu_read_unlock(); |
243 | return ret; |
244 | } |
245 | |
246 | unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id) |
247 | { |
248 | return __inet_dev_addr_type(net, NULL, addr, tb_id); |
249 | } |
250 | EXPORT_SYMBOL(inet_addr_type_table); |
251 | |
252 | unsigned int inet_addr_type(struct net *net, __be32 addr) |
253 | { |
254 | return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL); |
255 | } |
256 | EXPORT_SYMBOL(inet_addr_type); |
257 | |
258 | unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, |
259 | __be32 addr) |
260 | { |
261 | u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL; |
262 | |
263 | return __inet_dev_addr_type(net, dev, addr, rt_table); |
264 | } |
265 | EXPORT_SYMBOL(inet_dev_addr_type); |
266 | |
267 | /* inet_addr_type with dev == NULL but using the table from a dev |
268 | * if one is associated |
269 | */ |
270 | unsigned int inet_addr_type_dev_table(struct net *net, |
271 | const struct net_device *dev, |
272 | __be32 addr) |
273 | { |
274 | u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL; |
275 | |
276 | return __inet_dev_addr_type(net, NULL, addr, rt_table); |
277 | } |
278 | EXPORT_SYMBOL(inet_addr_type_dev_table); |
279 | |
280 | __be32 fib_compute_spec_dst(struct sk_buff *skb) |
281 | { |
282 | struct net_device *dev = skb->dev; |
283 | struct in_device *in_dev; |
284 | struct fib_result res; |
285 | struct rtable *rt; |
286 | struct net *net; |
287 | int scope; |
288 | |
289 | rt = skb_rtable(skb); |
290 | if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) == |
291 | RTCF_LOCAL) |
292 | return ip_hdr(skb)->daddr; |
293 | |
294 | in_dev = __in_dev_get_rcu(dev); |
295 | |
296 | net = dev_net(dev); |
297 | |
298 | scope = RT_SCOPE_UNIVERSE; |
299 | if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { |
300 | bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); |
301 | struct flowi4 fl4 = { |
302 | .flowi4_iif = LOOPBACK_IFINDEX, |
303 | .flowi4_oif = l3mdev_master_ifindex_rcu(dev), |
304 | .daddr = ip_hdr(skb)->saddr, |
305 | .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), |
306 | .flowi4_scope = scope, |
307 | .flowi4_mark = vmark ? skb->mark : 0, |
308 | }; |
309 | if (!fib_lookup(net, &fl4, &res, 0)) |
310 | return FIB_RES_PREFSRC(net, res); |
311 | } else { |
312 | scope = RT_SCOPE_LINK; |
313 | } |
314 | |
315 | return inet_select_addr(dev, ip_hdr(skb)->saddr, scope); |
316 | } |
317 | |
318 | bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev) |
319 | { |
320 | bool dev_match = false; |
321 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
322 | int ret; |
323 | |
324 | for (ret = 0; ret < fi->fib_nhs; ret++) { |
325 | struct fib_nh *nh = &fi->fib_nh[ret]; |
326 | |
327 | if (nh->nh_dev == dev) { |
328 | dev_match = true; |
329 | break; |
330 | } else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) { |
331 | dev_match = true; |
332 | break; |
333 | } |
334 | } |
335 | #else |
336 | if (fi->fib_nh[0].nh_dev == dev) |
337 | dev_match = true; |
338 | #endif |
339 | |
340 | return dev_match; |
341 | } |
342 | EXPORT_SYMBOL_GPL(fib_info_nh_uses_dev); |
343 | |
344 | /* Given (packet source, input interface) and optional (dst, oif, tos): |
345 | * - (main) check, that source is valid i.e. not broadcast or our local |
346 | * address. |
347 | * - figure out what "logical" interface this packet arrived |
348 | * and calculate "specific destination" address. |
349 | * - check, that packet arrived from expected physical interface. |
350 | * called with rcu_read_lock() |
351 | */ |
352 | static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, |
353 | u8 tos, int oif, struct net_device *dev, |
354 | int rpf, struct in_device *idev, u32 *itag) |
355 | { |
356 | struct net *net = dev_net(dev); |
357 | struct flow_keys flkeys; |
358 | int ret, no_addr; |
359 | struct fib_result res; |
360 | struct flowi4 fl4; |
361 | bool dev_match; |
362 | |
363 | fl4.flowi4_oif = 0; |
364 | fl4.flowi4_iif = l3mdev_master_ifindex_rcu(dev); |
365 | if (!fl4.flowi4_iif) |
366 | fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX; |
367 | fl4.daddr = src; |
368 | fl4.saddr = dst; |
369 | fl4.flowi4_tos = tos; |
370 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
371 | fl4.flowi4_tun_key.tun_id = 0; |
372 | fl4.flowi4_flags = 0; |
373 | fl4.flowi4_uid = sock_net_uid(net, NULL); |
374 | |
375 | no_addr = idev->ifa_list == NULL; |
376 | |
377 | fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0; |
378 | if (!fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys)) { |
379 | fl4.flowi4_proto = 0; |
380 | fl4.fl4_sport = 0; |
381 | fl4.fl4_dport = 0; |
382 | } |
383 | |
384 | if (fib_lookup(net, &fl4, &res, 0)) |
385 | goto last_resort; |
386 | if (res.type != RTN_UNICAST && |
387 | (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev))) |
388 | goto e_inval; |
389 | fib_combine_itag(itag, &res); |
390 | |
391 | dev_match = fib_info_nh_uses_dev(res.fi, dev); |
392 | if (dev_match) { |
393 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; |
394 | return ret; |
395 | } |
396 | if (no_addr) |
397 | goto last_resort; |
398 | if (rpf == 1) |
399 | goto e_rpf; |
400 | fl4.flowi4_oif = dev->ifindex; |
401 | |
402 | ret = 0; |
403 | if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) { |
404 | if (res.type == RTN_UNICAST) |
405 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; |
406 | } |
407 | return ret; |
408 | |
409 | last_resort: |
410 | if (rpf) |
411 | goto e_rpf; |
412 | *itag = 0; |
413 | return 0; |
414 | |
415 | e_inval: |
416 | return -EINVAL; |
417 | e_rpf: |
418 | return -EXDEV; |
419 | } |
420 | |
421 | /* Ignore rp_filter for packets protected by IPsec. */ |
422 | int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, |
423 | u8 tos, int oif, struct net_device *dev, |
424 | struct in_device *idev, u32 *itag) |
425 | { |
426 | int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); |
427 | struct net *net = dev_net(dev); |
428 | |
429 | if (!r && !fib_num_tclassid_users(net) && |
430 | (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) { |
431 | if (IN_DEV_ACCEPT_LOCAL(idev)) |
432 | goto ok; |
433 | /* with custom local routes in place, checking local addresses |
434 | * only will be too optimistic, with custom rules, checking |
435 | * local addresses only can be too strict, e.g. due to vrf |
436 | */ |
437 | if (net->ipv4.fib_has_custom_local_routes || |
438 | fib4_has_custom_rules(net)) |
439 | goto full_check; |
440 | if (inet_lookup_ifaddr_rcu(net, src)) |
441 | return -EINVAL; |
442 | |
443 | ok: |
444 | *itag = 0; |
445 | return 0; |
446 | } |
447 | |
448 | full_check: |
449 | return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag); |
450 | } |
451 | |
452 | static inline __be32 (struct sockaddr *addr) |
453 | { |
454 | return ((struct sockaddr_in *) addr)->sin_addr.s_addr; |
455 | } |
456 | |
457 | static int put_rtax(struct nlattr *mx, int len, int type, u32 value) |
458 | { |
459 | struct nlattr *nla; |
460 | |
461 | nla = (struct nlattr *) ((char *) mx + len); |
462 | nla->nla_type = type; |
463 | nla->nla_len = nla_attr_size(4); |
464 | *(u32 *) nla_data(nla) = value; |
465 | |
466 | return len + nla_total_size(4); |
467 | } |
468 | |
469 | static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, |
470 | struct fib_config *cfg) |
471 | { |
472 | __be32 addr; |
473 | int plen; |
474 | |
475 | memset(cfg, 0, sizeof(*cfg)); |
476 | cfg->fc_nlinfo.nl_net = net; |
477 | |
478 | if (rt->rt_dst.sa_family != AF_INET) |
479 | return -EAFNOSUPPORT; |
480 | |
481 | /* |
482 | * Check mask for validity: |
483 | * a) it must be contiguous. |
484 | * b) destination must have all host bits clear. |
485 | * c) if application forgot to set correct family (AF_INET), |
486 | * reject request unless it is absolutely clear i.e. |
487 | * both family and mask are zero. |
488 | */ |
489 | plen = 32; |
490 | addr = sk_extract_addr(&rt->rt_dst); |
491 | if (!(rt->rt_flags & RTF_HOST)) { |
492 | __be32 mask = sk_extract_addr(&rt->rt_genmask); |
493 | |
494 | if (rt->rt_genmask.sa_family != AF_INET) { |
495 | if (mask || rt->rt_genmask.sa_family) |
496 | return -EAFNOSUPPORT; |
497 | } |
498 | |
499 | if (bad_mask(mask, addr)) |
500 | return -EINVAL; |
501 | |
502 | plen = inet_mask_len(mask); |
503 | } |
504 | |
505 | cfg->fc_dst_len = plen; |
506 | cfg->fc_dst = addr; |
507 | |
508 | if (cmd != SIOCDELRT) { |
509 | cfg->fc_nlflags = NLM_F_CREATE; |
510 | cfg->fc_protocol = RTPROT_BOOT; |
511 | } |
512 | |
513 | if (rt->rt_metric) |
514 | cfg->fc_priority = rt->rt_metric - 1; |
515 | |
516 | if (rt->rt_flags & RTF_REJECT) { |
517 | cfg->fc_scope = RT_SCOPE_HOST; |
518 | cfg->fc_type = RTN_UNREACHABLE; |
519 | return 0; |
520 | } |
521 | |
522 | cfg->fc_scope = RT_SCOPE_NOWHERE; |
523 | cfg->fc_type = RTN_UNICAST; |
524 | |
525 | if (rt->rt_dev) { |
526 | char *colon; |
527 | struct net_device *dev; |
528 | char devname[IFNAMSIZ]; |
529 | |
530 | if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1)) |
531 | return -EFAULT; |
532 | |
533 | devname[IFNAMSIZ-1] = 0; |
534 | colon = strchr(devname, ':'); |
535 | if (colon) |
536 | *colon = 0; |
537 | dev = __dev_get_by_name(net, devname); |
538 | if (!dev) |
539 | return -ENODEV; |
540 | cfg->fc_oif = dev->ifindex; |
541 | cfg->fc_table = l3mdev_fib_table(dev); |
542 | if (colon) { |
543 | struct in_ifaddr *ifa; |
544 | struct in_device *in_dev = __in_dev_get_rtnl(dev); |
545 | if (!in_dev) |
546 | return -ENODEV; |
547 | *colon = ':'; |
548 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) |
549 | if (strcmp(ifa->ifa_label, devname) == 0) |
550 | break; |
551 | if (!ifa) |
552 | return -ENODEV; |
553 | cfg->fc_prefsrc = ifa->ifa_local; |
554 | } |
555 | } |
556 | |
557 | addr = sk_extract_addr(&rt->rt_gateway); |
558 | if (rt->rt_gateway.sa_family == AF_INET && addr) { |
559 | unsigned int addr_type; |
560 | |
561 | cfg->fc_gw = addr; |
562 | addr_type = inet_addr_type_table(net, addr, cfg->fc_table); |
563 | if (rt->rt_flags & RTF_GATEWAY && |
564 | addr_type == RTN_UNICAST) |
565 | cfg->fc_scope = RT_SCOPE_UNIVERSE; |
566 | } |
567 | |
568 | if (cmd == SIOCDELRT) |
569 | return 0; |
570 | |
571 | if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw) |
572 | return -EINVAL; |
573 | |
574 | if (cfg->fc_scope == RT_SCOPE_NOWHERE) |
575 | cfg->fc_scope = RT_SCOPE_LINK; |
576 | |
577 | if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) { |
578 | struct nlattr *mx; |
579 | int len = 0; |
580 | |
581 | mx = kcalloc(3, nla_total_size(4), GFP_KERNEL); |
582 | if (!mx) |
583 | return -ENOMEM; |
584 | |
585 | if (rt->rt_flags & RTF_MTU) |
586 | len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40); |
587 | |
588 | if (rt->rt_flags & RTF_WINDOW) |
589 | len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window); |
590 | |
591 | if (rt->rt_flags & RTF_IRTT) |
592 | len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3); |
593 | |
594 | cfg->fc_mx = mx; |
595 | cfg->fc_mx_len = len; |
596 | } |
597 | |
598 | return 0; |
599 | } |
600 | |
601 | /* |
602 | * Handle IP routing ioctl calls. |
603 | * These are used to manipulate the routing tables |
604 | */ |
605 | int ip_rt_ioctl(struct net *net, unsigned int cmd, struct rtentry *rt) |
606 | { |
607 | struct fib_config cfg; |
608 | int err; |
609 | |
610 | switch (cmd) { |
611 | case SIOCADDRT: /* Add a route */ |
612 | case SIOCDELRT: /* Delete a route */ |
613 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
614 | return -EPERM; |
615 | |
616 | rtnl_lock(); |
617 | err = rtentry_to_fib_config(net, cmd, rt, &cfg); |
618 | if (err == 0) { |
619 | struct fib_table *tb; |
620 | |
621 | if (cmd == SIOCDELRT) { |
622 | tb = fib_get_table(net, cfg.fc_table); |
623 | if (tb) |
624 | err = fib_table_delete(net, tb, &cfg, |
625 | NULL); |
626 | else |
627 | err = -ESRCH; |
628 | } else { |
629 | tb = fib_new_table(net, cfg.fc_table); |
630 | if (tb) |
631 | err = fib_table_insert(net, tb, |
632 | &cfg, NULL); |
633 | else |
634 | err = -ENOBUFS; |
635 | } |
636 | |
637 | /* allocated by rtentry_to_fib_config() */ |
638 | kfree(cfg.fc_mx); |
639 | } |
640 | rtnl_unlock(); |
641 | return err; |
642 | } |
643 | return -EINVAL; |
644 | } |
645 | |
646 | const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = { |
647 | [RTA_DST] = { .type = NLA_U32 }, |
648 | [RTA_SRC] = { .type = NLA_U32 }, |
649 | [RTA_IIF] = { .type = NLA_U32 }, |
650 | [RTA_OIF] = { .type = NLA_U32 }, |
651 | [RTA_GATEWAY] = { .type = NLA_U32 }, |
652 | [RTA_PRIORITY] = { .type = NLA_U32 }, |
653 | [RTA_PREFSRC] = { .type = NLA_U32 }, |
654 | [RTA_METRICS] = { .type = NLA_NESTED }, |
655 | [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, |
656 | [RTA_FLOW] = { .type = NLA_U32 }, |
657 | [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, |
658 | [RTA_ENCAP] = { .type = NLA_NESTED }, |
659 | [RTA_UID] = { .type = NLA_U32 }, |
660 | [RTA_MARK] = { .type = NLA_U32 }, |
661 | [RTA_TABLE] = { .type = NLA_U32 }, |
662 | [RTA_IP_PROTO] = { .type = NLA_U8 }, |
663 | [RTA_SPORT] = { .type = NLA_U16 }, |
664 | [RTA_DPORT] = { .type = NLA_U16 }, |
665 | }; |
666 | |
667 | static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, |
668 | struct nlmsghdr *nlh, struct fib_config *cfg, |
669 | struct netlink_ext_ack *extack) |
670 | { |
671 | struct nlattr *attr; |
672 | int err, remaining; |
673 | struct rtmsg *rtm; |
674 | |
675 | err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy, |
676 | extack); |
677 | if (err < 0) |
678 | goto errout; |
679 | |
680 | memset(cfg, 0, sizeof(*cfg)); |
681 | |
682 | rtm = nlmsg_data(nlh); |
683 | cfg->fc_dst_len = rtm->rtm_dst_len; |
684 | cfg->fc_tos = rtm->rtm_tos; |
685 | cfg->fc_table = rtm->rtm_table; |
686 | cfg->fc_protocol = rtm->rtm_protocol; |
687 | cfg->fc_scope = rtm->rtm_scope; |
688 | cfg->fc_type = rtm->rtm_type; |
689 | cfg->fc_flags = rtm->rtm_flags; |
690 | cfg->fc_nlflags = nlh->nlmsg_flags; |
691 | |
692 | cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; |
693 | cfg->fc_nlinfo.nlh = nlh; |
694 | cfg->fc_nlinfo.nl_net = net; |
695 | |
696 | if (cfg->fc_type > RTN_MAX) { |
697 | NL_SET_ERR_MSG(extack, "Invalid route type" ); |
698 | err = -EINVAL; |
699 | goto errout; |
700 | } |
701 | |
702 | nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) { |
703 | switch (nla_type(attr)) { |
704 | case RTA_DST: |
705 | cfg->fc_dst = nla_get_be32(attr); |
706 | break; |
707 | case RTA_OIF: |
708 | cfg->fc_oif = nla_get_u32(attr); |
709 | break; |
710 | case RTA_GATEWAY: |
711 | cfg->fc_gw = nla_get_be32(attr); |
712 | break; |
713 | case RTA_VIA: |
714 | NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute" ); |
715 | err = -EINVAL; |
716 | goto errout; |
717 | case RTA_PRIORITY: |
718 | cfg->fc_priority = nla_get_u32(attr); |
719 | break; |
720 | case RTA_PREFSRC: |
721 | cfg->fc_prefsrc = nla_get_be32(attr); |
722 | break; |
723 | case RTA_METRICS: |
724 | cfg->fc_mx = nla_data(attr); |
725 | cfg->fc_mx_len = nla_len(attr); |
726 | break; |
727 | case RTA_MULTIPATH: |
728 | err = lwtunnel_valid_encap_type_attr(nla_data(attr), |
729 | nla_len(attr), |
730 | extack); |
731 | if (err < 0) |
732 | goto errout; |
733 | cfg->fc_mp = nla_data(attr); |
734 | cfg->fc_mp_len = nla_len(attr); |
735 | break; |
736 | case RTA_FLOW: |
737 | cfg->fc_flow = nla_get_u32(attr); |
738 | break; |
739 | case RTA_TABLE: |
740 | cfg->fc_table = nla_get_u32(attr); |
741 | break; |
742 | case RTA_ENCAP: |
743 | cfg->fc_encap = attr; |
744 | break; |
745 | case RTA_ENCAP_TYPE: |
746 | cfg->fc_encap_type = nla_get_u16(attr); |
747 | err = lwtunnel_valid_encap_type(cfg->fc_encap_type, |
748 | extack); |
749 | if (err < 0) |
750 | goto errout; |
751 | break; |
752 | } |
753 | } |
754 | |
755 | return 0; |
756 | errout: |
757 | return err; |
758 | } |
759 | |
760 | static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, |
761 | struct netlink_ext_ack *extack) |
762 | { |
763 | struct net *net = sock_net(skb->sk); |
764 | struct fib_config cfg; |
765 | struct fib_table *tb; |
766 | int err; |
767 | |
768 | err = rtm_to_fib_config(net, skb, nlh, &cfg, extack); |
769 | if (err < 0) |
770 | goto errout; |
771 | |
772 | tb = fib_get_table(net, cfg.fc_table); |
773 | if (!tb) { |
774 | NL_SET_ERR_MSG(extack, "FIB table does not exist" ); |
775 | err = -ESRCH; |
776 | goto errout; |
777 | } |
778 | |
779 | err = fib_table_delete(net, tb, &cfg, extack); |
780 | errout: |
781 | return err; |
782 | } |
783 | |
784 | static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, |
785 | struct netlink_ext_ack *extack) |
786 | { |
787 | struct net *net = sock_net(skb->sk); |
788 | struct fib_config cfg; |
789 | struct fib_table *tb; |
790 | int err; |
791 | |
792 | err = rtm_to_fib_config(net, skb, nlh, &cfg, extack); |
793 | if (err < 0) |
794 | goto errout; |
795 | |
796 | tb = fib_new_table(net, cfg.fc_table); |
797 | if (!tb) { |
798 | err = -ENOBUFS; |
799 | goto errout; |
800 | } |
801 | |
802 | err = fib_table_insert(net, tb, &cfg, extack); |
803 | if (!err && cfg.fc_type == RTN_LOCAL) |
804 | net->ipv4.fib_has_custom_local_routes = true; |
805 | errout: |
806 | return err; |
807 | } |
808 | |
809 | int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, |
810 | struct fib_dump_filter *filter, |
811 | struct netlink_callback *cb) |
812 | { |
813 | struct netlink_ext_ack *extack = cb->extack; |
814 | struct nlattr *tb[RTA_MAX + 1]; |
815 | struct rtmsg *rtm; |
816 | int err, i; |
817 | |
818 | ASSERT_RTNL(); |
819 | |
820 | if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { |
821 | NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request" ); |
822 | return -EINVAL; |
823 | } |
824 | |
825 | rtm = nlmsg_data(nlh); |
826 | if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos || |
827 | rtm->rtm_scope) { |
828 | NL_SET_ERR_MSG(extack, "Invalid values in header for FIB dump request" ); |
829 | return -EINVAL; |
830 | } |
831 | if (rtm->rtm_flags & ~(RTM_F_CLONED | RTM_F_PREFIX)) { |
832 | NL_SET_ERR_MSG(extack, "Invalid flags for FIB dump request" ); |
833 | return -EINVAL; |
834 | } |
835 | |
836 | filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC); |
837 | filter->flags = rtm->rtm_flags; |
838 | filter->protocol = rtm->rtm_protocol; |
839 | filter->rt_type = rtm->rtm_type; |
840 | filter->table_id = rtm->rtm_table; |
841 | |
842 | err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX, |
843 | rtm_ipv4_policy, extack); |
844 | if (err < 0) |
845 | return err; |
846 | |
847 | for (i = 0; i <= RTA_MAX; ++i) { |
848 | int ifindex; |
849 | |
850 | if (!tb[i]) |
851 | continue; |
852 | |
853 | switch (i) { |
854 | case RTA_TABLE: |
855 | filter->table_id = nla_get_u32(tb[i]); |
856 | break; |
857 | case RTA_OIF: |
858 | ifindex = nla_get_u32(tb[i]); |
859 | filter->dev = __dev_get_by_index(net, ifindex); |
860 | if (!filter->dev) |
861 | return -ENODEV; |
862 | break; |
863 | default: |
864 | NL_SET_ERR_MSG(extack, "Unsupported attribute in dump request" ); |
865 | return -EINVAL; |
866 | } |
867 | } |
868 | |
869 | if (filter->flags || filter->protocol || filter->rt_type || |
870 | filter->table_id || filter->dev) { |
871 | filter->filter_set = 1; |
872 | cb->answer_flags = NLM_F_DUMP_FILTERED; |
873 | } |
874 | |
875 | return 0; |
876 | } |
877 | EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req); |
878 | |
879 | static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) |
880 | { |
881 | const struct nlmsghdr *nlh = cb->nlh; |
882 | struct net *net = sock_net(skb->sk); |
883 | struct fib_dump_filter filter = {}; |
884 | unsigned int h, s_h; |
885 | unsigned int e = 0, s_e; |
886 | struct fib_table *tb; |
887 | struct hlist_head *head; |
888 | int dumped = 0, err; |
889 | |
890 | if (cb->strict_check) { |
891 | err = ip_valid_fib_dump_req(net, nlh, &filter, cb); |
892 | if (err < 0) |
893 | return err; |
894 | } else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) { |
895 | struct rtmsg *rtm = nlmsg_data(nlh); |
896 | |
897 | filter.flags = rtm->rtm_flags & (RTM_F_PREFIX | RTM_F_CLONED); |
898 | } |
899 | |
900 | /* fib entries are never clones and ipv4 does not use prefix flag */ |
901 | if (filter.flags & (RTM_F_PREFIX | RTM_F_CLONED)) |
902 | return skb->len; |
903 | |
904 | if (filter.table_id) { |
905 | tb = fib_get_table(net, filter.table_id); |
906 | if (!tb) { |
907 | if (filter.dump_all_families) |
908 | return skb->len; |
909 | |
910 | NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist" ); |
911 | return -ENOENT; |
912 | } |
913 | |
914 | err = fib_table_dump(tb, skb, cb, &filter); |
915 | return skb->len ? : err; |
916 | } |
917 | |
918 | s_h = cb->args[0]; |
919 | s_e = cb->args[1]; |
920 | |
921 | rcu_read_lock(); |
922 | |
923 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { |
924 | e = 0; |
925 | head = &net->ipv4.fib_table_hash[h]; |
926 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
927 | if (e < s_e) |
928 | goto next; |
929 | if (dumped) |
930 | memset(&cb->args[2], 0, sizeof(cb->args) - |
931 | 2 * sizeof(cb->args[0])); |
932 | err = fib_table_dump(tb, skb, cb, &filter); |
933 | if (err < 0) { |
934 | if (likely(skb->len)) |
935 | goto out; |
936 | |
937 | goto out_err; |
938 | } |
939 | dumped = 1; |
940 | next: |
941 | e++; |
942 | } |
943 | } |
944 | out: |
945 | err = skb->len; |
946 | out_err: |
947 | rcu_read_unlock(); |
948 | |
949 | cb->args[1] = e; |
950 | cb->args[0] = h; |
951 | |
952 | return err; |
953 | } |
954 | |
955 | /* Prepare and feed intra-kernel routing request. |
956 | * Really, it should be netlink message, but :-( netlink |
957 | * can be not configured, so that we feed it directly |
958 | * to fib engine. It is legal, because all events occur |
959 | * only when netlink is already locked. |
960 | */ |
961 | static void fib_magic(int cmd, int type, __be32 dst, int dst_len, |
962 | struct in_ifaddr *ifa, u32 rt_priority) |
963 | { |
964 | struct net *net = dev_net(ifa->ifa_dev->dev); |
965 | u32 tb_id = l3mdev_fib_table(ifa->ifa_dev->dev); |
966 | struct fib_table *tb; |
967 | struct fib_config cfg = { |
968 | .fc_protocol = RTPROT_KERNEL, |
969 | .fc_type = type, |
970 | .fc_dst = dst, |
971 | .fc_dst_len = dst_len, |
972 | .fc_priority = rt_priority, |
973 | .fc_prefsrc = ifa->ifa_local, |
974 | .fc_oif = ifa->ifa_dev->dev->ifindex, |
975 | .fc_nlflags = NLM_F_CREATE | NLM_F_APPEND, |
976 | .fc_nlinfo = { |
977 | .nl_net = net, |
978 | }, |
979 | }; |
980 | |
981 | if (!tb_id) |
982 | tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL; |
983 | |
984 | tb = fib_new_table(net, tb_id); |
985 | if (!tb) |
986 | return; |
987 | |
988 | cfg.fc_table = tb->tb_id; |
989 | |
990 | if (type != RTN_LOCAL) |
991 | cfg.fc_scope = RT_SCOPE_LINK; |
992 | else |
993 | cfg.fc_scope = RT_SCOPE_HOST; |
994 | |
995 | if (cmd == RTM_NEWROUTE) |
996 | fib_table_insert(net, tb, &cfg, NULL); |
997 | else |
998 | fib_table_delete(net, tb, &cfg, NULL); |
999 | } |
1000 | |
1001 | void fib_add_ifaddr(struct in_ifaddr *ifa) |
1002 | { |
1003 | struct in_device *in_dev = ifa->ifa_dev; |
1004 | struct net_device *dev = in_dev->dev; |
1005 | struct in_ifaddr *prim = ifa; |
1006 | __be32 mask = ifa->ifa_mask; |
1007 | __be32 addr = ifa->ifa_local; |
1008 | __be32 prefix = ifa->ifa_address & mask; |
1009 | |
1010 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
1011 | prim = inet_ifa_byprefix(in_dev, prefix, mask); |
1012 | if (!prim) { |
1013 | pr_warn("%s: bug: prim == NULL\n" , __func__); |
1014 | return; |
1015 | } |
1016 | } |
1017 | |
1018 | fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim, 0); |
1019 | |
1020 | if (!(dev->flags & IFF_UP)) |
1021 | return; |
1022 | |
1023 | /* Add broadcast address, if it is explicitly assigned. */ |
1024 | if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) |
1025 | fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, |
1026 | prim, 0); |
1027 | |
1028 | if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) && |
1029 | (prefix != addr || ifa->ifa_prefixlen < 32)) { |
1030 | if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE)) |
1031 | fib_magic(RTM_NEWROUTE, |
1032 | dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, |
1033 | prefix, ifa->ifa_prefixlen, prim, |
1034 | ifa->ifa_rt_priority); |
1035 | |
1036 | /* Add network specific broadcasts, when it takes a sense */ |
1037 | if (ifa->ifa_prefixlen < 31) { |
1038 | fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, |
1039 | prim, 0); |
1040 | fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask, |
1041 | 32, prim, 0); |
1042 | } |
1043 | } |
1044 | } |
1045 | |
1046 | void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric) |
1047 | { |
1048 | __be32 prefix = ifa->ifa_address & ifa->ifa_mask; |
1049 | struct in_device *in_dev = ifa->ifa_dev; |
1050 | struct net_device *dev = in_dev->dev; |
1051 | |
1052 | if (!(dev->flags & IFF_UP) || |
1053 | ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) || |
1054 | ipv4_is_zeronet(prefix) || |
1055 | prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32) |
1056 | return; |
1057 | |
1058 | /* add the new */ |
1059 | fib_magic(RTM_NEWROUTE, |
1060 | dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, |
1061 | prefix, ifa->ifa_prefixlen, ifa, new_metric); |
1062 | |
1063 | /* delete the old */ |
1064 | fib_magic(RTM_DELROUTE, |
1065 | dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, |
1066 | prefix, ifa->ifa_prefixlen, ifa, ifa->ifa_rt_priority); |
1067 | } |
1068 | |
1069 | /* Delete primary or secondary address. |
1070 | * Optionally, on secondary address promotion consider the addresses |
1071 | * from subnet iprim as deleted, even if they are in device list. |
1072 | * In this case the secondary ifa can be in device list. |
1073 | */ |
1074 | void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) |
1075 | { |
1076 | struct in_device *in_dev = ifa->ifa_dev; |
1077 | struct net_device *dev = in_dev->dev; |
1078 | struct in_ifaddr *ifa1; |
1079 | struct in_ifaddr *prim = ifa, *prim1 = NULL; |
1080 | __be32 brd = ifa->ifa_address | ~ifa->ifa_mask; |
1081 | __be32 any = ifa->ifa_address & ifa->ifa_mask; |
1082 | #define LOCAL_OK 1 |
1083 | #define BRD_OK 2 |
1084 | #define BRD0_OK 4 |
1085 | #define BRD1_OK 8 |
1086 | unsigned int ok = 0; |
1087 | int subnet = 0; /* Primary network */ |
1088 | int gone = 1; /* Address is missing */ |
1089 | int same_prefsrc = 0; /* Another primary with same IP */ |
1090 | |
1091 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
1092 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); |
1093 | if (!prim) { |
1094 | /* if the device has been deleted, we don't perform |
1095 | * address promotion |
1096 | */ |
1097 | if (!in_dev->dead) |
1098 | pr_warn("%s: bug: prim == NULL\n" , __func__); |
1099 | return; |
1100 | } |
1101 | if (iprim && iprim != prim) { |
1102 | pr_warn("%s: bug: iprim != prim\n" , __func__); |
1103 | return; |
1104 | } |
1105 | } else if (!ipv4_is_zeronet(any) && |
1106 | (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) { |
1107 | if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE)) |
1108 | fib_magic(RTM_DELROUTE, |
1109 | dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, |
1110 | any, ifa->ifa_prefixlen, prim, 0); |
1111 | subnet = 1; |
1112 | } |
1113 | |
1114 | if (in_dev->dead) |
1115 | goto no_promotions; |
1116 | |
1117 | /* Deletion is more complicated than add. |
1118 | * We should take care of not to delete too much :-) |
1119 | * |
1120 | * Scan address list to be sure that addresses are really gone. |
1121 | */ |
1122 | |
1123 | for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { |
1124 | if (ifa1 == ifa) { |
1125 | /* promotion, keep the IP */ |
1126 | gone = 0; |
1127 | continue; |
1128 | } |
1129 | /* Ignore IFAs from our subnet */ |
1130 | if (iprim && ifa1->ifa_mask == iprim->ifa_mask && |
1131 | inet_ifa_match(ifa1->ifa_address, iprim)) |
1132 | continue; |
1133 | |
1134 | /* Ignore ifa1 if it uses different primary IP (prefsrc) */ |
1135 | if (ifa1->ifa_flags & IFA_F_SECONDARY) { |
1136 | /* Another address from our subnet? */ |
1137 | if (ifa1->ifa_mask == prim->ifa_mask && |
1138 | inet_ifa_match(ifa1->ifa_address, prim)) |
1139 | prim1 = prim; |
1140 | else { |
1141 | /* We reached the secondaries, so |
1142 | * same_prefsrc should be determined. |
1143 | */ |
1144 | if (!same_prefsrc) |
1145 | continue; |
1146 | /* Search new prim1 if ifa1 is not |
1147 | * using the current prim1 |
1148 | */ |
1149 | if (!prim1 || |
1150 | ifa1->ifa_mask != prim1->ifa_mask || |
1151 | !inet_ifa_match(ifa1->ifa_address, prim1)) |
1152 | prim1 = inet_ifa_byprefix(in_dev, |
1153 | ifa1->ifa_address, |
1154 | ifa1->ifa_mask); |
1155 | if (!prim1) |
1156 | continue; |
1157 | if (prim1->ifa_local != prim->ifa_local) |
1158 | continue; |
1159 | } |
1160 | } else { |
1161 | if (prim->ifa_local != ifa1->ifa_local) |
1162 | continue; |
1163 | prim1 = ifa1; |
1164 | if (prim != prim1) |
1165 | same_prefsrc = 1; |
1166 | } |
1167 | if (ifa->ifa_local == ifa1->ifa_local) |
1168 | ok |= LOCAL_OK; |
1169 | if (ifa->ifa_broadcast == ifa1->ifa_broadcast) |
1170 | ok |= BRD_OK; |
1171 | if (brd == ifa1->ifa_broadcast) |
1172 | ok |= BRD1_OK; |
1173 | if (any == ifa1->ifa_broadcast) |
1174 | ok |= BRD0_OK; |
1175 | /* primary has network specific broadcasts */ |
1176 | if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) { |
1177 | __be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask; |
1178 | __be32 any1 = ifa1->ifa_address & ifa1->ifa_mask; |
1179 | |
1180 | if (!ipv4_is_zeronet(any1)) { |
1181 | if (ifa->ifa_broadcast == brd1 || |
1182 | ifa->ifa_broadcast == any1) |
1183 | ok |= BRD_OK; |
1184 | if (brd == brd1 || brd == any1) |
1185 | ok |= BRD1_OK; |
1186 | if (any == brd1 || any == any1) |
1187 | ok |= BRD0_OK; |
1188 | } |
1189 | } |
1190 | } |
1191 | |
1192 | no_promotions: |
1193 | if (!(ok & BRD_OK)) |
1194 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, |
1195 | prim, 0); |
1196 | if (subnet && ifa->ifa_prefixlen < 31) { |
1197 | if (!(ok & BRD1_OK)) |
1198 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, |
1199 | prim, 0); |
1200 | if (!(ok & BRD0_OK)) |
1201 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, |
1202 | prim, 0); |
1203 | } |
1204 | if (!(ok & LOCAL_OK)) { |
1205 | unsigned int addr_type; |
1206 | |
1207 | fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim, 0); |
1208 | |
1209 | /* Check, that this local address finally disappeared. */ |
1210 | addr_type = inet_addr_type_dev_table(dev_net(dev), dev, |
1211 | ifa->ifa_local); |
1212 | if (gone && addr_type != RTN_LOCAL) { |
1213 | /* And the last, but not the least thing. |
1214 | * We must flush stray FIB entries. |
1215 | * |
1216 | * First of all, we scan fib_info list searching |
1217 | * for stray nexthop entries, then ignite fib_flush. |
1218 | */ |
1219 | if (fib_sync_down_addr(dev, ifa->ifa_local)) |
1220 | fib_flush(dev_net(dev)); |
1221 | } |
1222 | } |
1223 | #undef LOCAL_OK |
1224 | #undef BRD_OK |
1225 | #undef BRD0_OK |
1226 | #undef BRD1_OK |
1227 | } |
1228 | |
1229 | static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn) |
1230 | { |
1231 | |
1232 | struct fib_result res; |
1233 | struct flowi4 fl4 = { |
1234 | .flowi4_mark = frn->fl_mark, |
1235 | .daddr = frn->fl_addr, |
1236 | .flowi4_tos = frn->fl_tos, |
1237 | .flowi4_scope = frn->fl_scope, |
1238 | }; |
1239 | struct fib_table *tb; |
1240 | |
1241 | rcu_read_lock(); |
1242 | |
1243 | tb = fib_get_table(net, frn->tb_id_in); |
1244 | |
1245 | frn->err = -ENOENT; |
1246 | if (tb) { |
1247 | local_bh_disable(); |
1248 | |
1249 | frn->tb_id = tb->tb_id; |
1250 | frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); |
1251 | |
1252 | if (!frn->err) { |
1253 | frn->prefixlen = res.prefixlen; |
1254 | frn->nh_sel = res.nh_sel; |
1255 | frn->type = res.type; |
1256 | frn->scope = res.scope; |
1257 | } |
1258 | local_bh_enable(); |
1259 | } |
1260 | |
1261 | rcu_read_unlock(); |
1262 | } |
1263 | |
1264 | static void nl_fib_input(struct sk_buff *skb) |
1265 | { |
1266 | struct net *net; |
1267 | struct fib_result_nl *frn; |
1268 | struct nlmsghdr *nlh; |
1269 | u32 portid; |
1270 | |
1271 | net = sock_net(skb->sk); |
1272 | nlh = nlmsg_hdr(skb); |
1273 | if (skb->len < nlmsg_total_size(sizeof(*frn)) || |
1274 | skb->len < nlh->nlmsg_len || |
1275 | nlmsg_len(nlh) < sizeof(*frn)) |
1276 | return; |
1277 | |
1278 | skb = netlink_skb_clone(skb, GFP_KERNEL); |
1279 | if (!skb) |
1280 | return; |
1281 | nlh = nlmsg_hdr(skb); |
1282 | |
1283 | frn = (struct fib_result_nl *) nlmsg_data(nlh); |
1284 | nl_fib_lookup(net, frn); |
1285 | |
1286 | portid = NETLINK_CB(skb).portid; /* netlink portid */ |
1287 | NETLINK_CB(skb).portid = 0; /* from kernel */ |
1288 | NETLINK_CB(skb).dst_group = 0; /* unicast */ |
1289 | netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT); |
1290 | } |
1291 | |
1292 | static int __net_init nl_fib_lookup_init(struct net *net) |
1293 | { |
1294 | struct sock *sk; |
1295 | struct netlink_kernel_cfg cfg = { |
1296 | .input = nl_fib_input, |
1297 | }; |
1298 | |
1299 | sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg); |
1300 | if (!sk) |
1301 | return -EAFNOSUPPORT; |
1302 | net->ipv4.fibnl = sk; |
1303 | return 0; |
1304 | } |
1305 | |
1306 | static void nl_fib_lookup_exit(struct net *net) |
1307 | { |
1308 | netlink_kernel_release(net->ipv4.fibnl); |
1309 | net->ipv4.fibnl = NULL; |
1310 | } |
1311 | |
1312 | static void fib_disable_ip(struct net_device *dev, unsigned long event, |
1313 | bool force) |
1314 | { |
1315 | if (fib_sync_down_dev(dev, event, force)) |
1316 | fib_flush(dev_net(dev)); |
1317 | else |
1318 | rt_cache_flush(dev_net(dev)); |
1319 | arp_ifdown(dev); |
1320 | } |
1321 | |
1322 | static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) |
1323 | { |
1324 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
1325 | struct net_device *dev = ifa->ifa_dev->dev; |
1326 | struct net *net = dev_net(dev); |
1327 | |
1328 | switch (event) { |
1329 | case NETDEV_UP: |
1330 | fib_add_ifaddr(ifa); |
1331 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1332 | fib_sync_up(dev, RTNH_F_DEAD); |
1333 | #endif |
1334 | atomic_inc(&net->ipv4.dev_addr_genid); |
1335 | rt_cache_flush(dev_net(dev)); |
1336 | break; |
1337 | case NETDEV_DOWN: |
1338 | fib_del_ifaddr(ifa, NULL); |
1339 | atomic_inc(&net->ipv4.dev_addr_genid); |
1340 | if (!ifa->ifa_dev->ifa_list) { |
1341 | /* Last address was deleted from this interface. |
1342 | * Disable IP. |
1343 | */ |
1344 | fib_disable_ip(dev, event, true); |
1345 | } else { |
1346 | rt_cache_flush(dev_net(dev)); |
1347 | } |
1348 | break; |
1349 | } |
1350 | return NOTIFY_DONE; |
1351 | } |
1352 | |
1353 | static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) |
1354 | { |
1355 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
1356 | struct netdev_notifier_changeupper_info *upper_info = ptr; |
1357 | struct netdev_notifier_info_ext *info_ext = ptr; |
1358 | struct in_device *in_dev; |
1359 | struct net *net = dev_net(dev); |
1360 | unsigned int flags; |
1361 | |
1362 | if (event == NETDEV_UNREGISTER) { |
1363 | fib_disable_ip(dev, event, true); |
1364 | rt_flush_dev(dev); |
1365 | return NOTIFY_DONE; |
1366 | } |
1367 | |
1368 | in_dev = __in_dev_get_rtnl(dev); |
1369 | if (!in_dev) |
1370 | return NOTIFY_DONE; |
1371 | |
1372 | switch (event) { |
1373 | case NETDEV_UP: |
1374 | for_ifa(in_dev) { |
1375 | fib_add_ifaddr(ifa); |
1376 | } endfor_ifa(in_dev); |
1377 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1378 | fib_sync_up(dev, RTNH_F_DEAD); |
1379 | #endif |
1380 | atomic_inc(&net->ipv4.dev_addr_genid); |
1381 | rt_cache_flush(net); |
1382 | break; |
1383 | case NETDEV_DOWN: |
1384 | fib_disable_ip(dev, event, false); |
1385 | break; |
1386 | case NETDEV_CHANGE: |
1387 | flags = dev_get_flags(dev); |
1388 | if (flags & (IFF_RUNNING | IFF_LOWER_UP)) |
1389 | fib_sync_up(dev, RTNH_F_LINKDOWN); |
1390 | else |
1391 | fib_sync_down_dev(dev, event, false); |
1392 | rt_cache_flush(net); |
1393 | break; |
1394 | case NETDEV_CHANGEMTU: |
1395 | fib_sync_mtu(dev, info_ext->ext.mtu); |
1396 | rt_cache_flush(net); |
1397 | break; |
1398 | case NETDEV_CHANGEUPPER: |
1399 | upper_info = ptr; |
1400 | /* flush all routes if dev is linked to or unlinked from |
1401 | * an L3 master device (e.g., VRF) |
1402 | */ |
1403 | if (upper_info->upper_dev && |
1404 | netif_is_l3_master(upper_info->upper_dev)) |
1405 | fib_disable_ip(dev, NETDEV_DOWN, true); |
1406 | break; |
1407 | } |
1408 | return NOTIFY_DONE; |
1409 | } |
1410 | |
1411 | static struct notifier_block fib_inetaddr_notifier = { |
1412 | .notifier_call = fib_inetaddr_event, |
1413 | }; |
1414 | |
1415 | static struct notifier_block fib_netdev_notifier = { |
1416 | .notifier_call = fib_netdev_event, |
1417 | }; |
1418 | |
1419 | static int __net_init ip_fib_net_init(struct net *net) |
1420 | { |
1421 | int err; |
1422 | size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ; |
1423 | |
1424 | err = fib4_notifier_init(net); |
1425 | if (err) |
1426 | return err; |
1427 | |
1428 | /* Avoid false sharing : Use at least a full cache line */ |
1429 | size = max_t(size_t, size, L1_CACHE_BYTES); |
1430 | |
1431 | net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL); |
1432 | if (!net->ipv4.fib_table_hash) { |
1433 | err = -ENOMEM; |
1434 | goto err_table_hash_alloc; |
1435 | } |
1436 | |
1437 | err = fib4_rules_init(net); |
1438 | if (err < 0) |
1439 | goto err_rules_init; |
1440 | return 0; |
1441 | |
1442 | err_rules_init: |
1443 | kfree(net->ipv4.fib_table_hash); |
1444 | err_table_hash_alloc: |
1445 | fib4_notifier_exit(net); |
1446 | return err; |
1447 | } |
1448 | |
1449 | static void ip_fib_net_exit(struct net *net) |
1450 | { |
1451 | int i; |
1452 | |
1453 | rtnl_lock(); |
1454 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
1455 | RCU_INIT_POINTER(net->ipv4.fib_main, NULL); |
1456 | RCU_INIT_POINTER(net->ipv4.fib_default, NULL); |
1457 | #endif |
1458 | /* Destroy the tables in reverse order to guarantee that the |
1459 | * local table, ID 255, is destroyed before the main table, ID |
1460 | * 254. This is necessary as the local table may contain |
1461 | * references to data contained in the main table. |
1462 | */ |
1463 | for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) { |
1464 | struct hlist_head *head = &net->ipv4.fib_table_hash[i]; |
1465 | struct hlist_node *tmp; |
1466 | struct fib_table *tb; |
1467 | |
1468 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { |
1469 | hlist_del(&tb->tb_hlist); |
1470 | fib_table_flush(net, tb, true); |
1471 | fib_free_table(tb); |
1472 | } |
1473 | } |
1474 | |
1475 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
1476 | fib4_rules_exit(net); |
1477 | #endif |
1478 | rtnl_unlock(); |
1479 | kfree(net->ipv4.fib_table_hash); |
1480 | fib4_notifier_exit(net); |
1481 | } |
1482 | |
1483 | static int __net_init fib_net_init(struct net *net) |
1484 | { |
1485 | int error; |
1486 | |
1487 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1488 | net->ipv4.fib_num_tclassid_users = 0; |
1489 | #endif |
1490 | error = ip_fib_net_init(net); |
1491 | if (error < 0) |
1492 | goto out; |
1493 | error = nl_fib_lookup_init(net); |
1494 | if (error < 0) |
1495 | goto out_nlfl; |
1496 | error = fib_proc_init(net); |
1497 | if (error < 0) |
1498 | goto out_proc; |
1499 | out: |
1500 | return error; |
1501 | |
1502 | out_proc: |
1503 | nl_fib_lookup_exit(net); |
1504 | out_nlfl: |
1505 | ip_fib_net_exit(net); |
1506 | goto out; |
1507 | } |
1508 | |
1509 | static void __net_exit fib_net_exit(struct net *net) |
1510 | { |
1511 | fib_proc_exit(net); |
1512 | nl_fib_lookup_exit(net); |
1513 | ip_fib_net_exit(net); |
1514 | } |
1515 | |
1516 | static struct pernet_operations fib_net_ops = { |
1517 | .init = fib_net_init, |
1518 | .exit = fib_net_exit, |
1519 | }; |
1520 | |
1521 | void __init ip_fib_init(void) |
1522 | { |
1523 | fib_trie_init(); |
1524 | |
1525 | register_pernet_subsys(&fib_net_ops); |
1526 | |
1527 | register_netdevice_notifier(&fib_netdev_notifier); |
1528 | register_inetaddr_notifier(&fib_inetaddr_notifier); |
1529 | |
1530 | rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0); |
1531 | rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0); |
1532 | rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, 0); |
1533 | } |
1534 | |