1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. |
5 | * |
6 | * IPv4 Forwarding Information Base: semantics. |
7 | * |
8 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version |
13 | * 2 of the License, or (at your option) any later version. |
14 | */ |
15 | |
16 | #include <linux/uaccess.h> |
17 | #include <linux/bitops.h> |
18 | #include <linux/types.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/jiffies.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/string.h> |
23 | #include <linux/socket.h> |
24 | #include <linux/sockios.h> |
25 | #include <linux/errno.h> |
26 | #include <linux/in.h> |
27 | #include <linux/inet.h> |
28 | #include <linux/inetdevice.h> |
29 | #include <linux/netdevice.h> |
30 | #include <linux/if_arp.h> |
31 | #include <linux/proc_fs.h> |
32 | #include <linux/skbuff.h> |
33 | #include <linux/init.h> |
34 | #include <linux/slab.h> |
35 | #include <linux/netlink.h> |
36 | |
37 | #include <net/arp.h> |
38 | #include <net/ip.h> |
39 | #include <net/protocol.h> |
40 | #include <net/route.h> |
41 | #include <net/tcp.h> |
42 | #include <net/sock.h> |
43 | #include <net/ip_fib.h> |
44 | #include <net/netlink.h> |
45 | #include <net/nexthop.h> |
46 | #include <net/lwtunnel.h> |
47 | #include <net/fib_notifier.h> |
48 | |
49 | #include "fib_lookup.h" |
50 | |
51 | static DEFINE_SPINLOCK(fib_info_lock); |
52 | static struct hlist_head *fib_info_hash; |
53 | static struct hlist_head *fib_info_laddrhash; |
54 | static unsigned int fib_info_hash_size; |
55 | static unsigned int fib_info_cnt; |
56 | |
57 | #define DEVINDEX_HASHBITS 8 |
58 | #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) |
59 | static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; |
60 | |
61 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
62 | |
63 | #define for_nexthops(fi) { \ |
64 | int nhsel; const struct fib_nh *nh; \ |
65 | for (nhsel = 0, nh = (fi)->fib_nh; \ |
66 | nhsel < (fi)->fib_nhs; \ |
67 | nh++, nhsel++) |
68 | |
69 | #define change_nexthops(fi) { \ |
70 | int nhsel; struct fib_nh *nexthop_nh; \ |
71 | for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ |
72 | nhsel < (fi)->fib_nhs; \ |
73 | nexthop_nh++, nhsel++) |
74 | |
75 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ |
76 | |
77 | /* Hope, that gcc will optimize it to get rid of dummy loop */ |
78 | |
79 | #define for_nexthops(fi) { \ |
80 | int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ |
81 | for (nhsel = 0; nhsel < 1; nhsel++) |
82 | |
83 | #define change_nexthops(fi) { \ |
84 | int nhsel; \ |
85 | struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ |
86 | for (nhsel = 0; nhsel < 1; nhsel++) |
87 | |
88 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ |
89 | |
90 | #define endfor_nexthops(fi) } |
91 | |
92 | |
93 | const struct fib_prop fib_props[RTN_MAX + 1] = { |
94 | [RTN_UNSPEC] = { |
95 | .error = 0, |
96 | .scope = RT_SCOPE_NOWHERE, |
97 | }, |
98 | [RTN_UNICAST] = { |
99 | .error = 0, |
100 | .scope = RT_SCOPE_UNIVERSE, |
101 | }, |
102 | [RTN_LOCAL] = { |
103 | .error = 0, |
104 | .scope = RT_SCOPE_HOST, |
105 | }, |
106 | [RTN_BROADCAST] = { |
107 | .error = 0, |
108 | .scope = RT_SCOPE_LINK, |
109 | }, |
110 | [RTN_ANYCAST] = { |
111 | .error = 0, |
112 | .scope = RT_SCOPE_LINK, |
113 | }, |
114 | [RTN_MULTICAST] = { |
115 | .error = 0, |
116 | .scope = RT_SCOPE_UNIVERSE, |
117 | }, |
118 | [RTN_BLACKHOLE] = { |
119 | .error = -EINVAL, |
120 | .scope = RT_SCOPE_UNIVERSE, |
121 | }, |
122 | [RTN_UNREACHABLE] = { |
123 | .error = -EHOSTUNREACH, |
124 | .scope = RT_SCOPE_UNIVERSE, |
125 | }, |
126 | [RTN_PROHIBIT] = { |
127 | .error = -EACCES, |
128 | .scope = RT_SCOPE_UNIVERSE, |
129 | }, |
130 | [RTN_THROW] = { |
131 | .error = -EAGAIN, |
132 | .scope = RT_SCOPE_UNIVERSE, |
133 | }, |
134 | [RTN_NAT] = { |
135 | .error = -EINVAL, |
136 | .scope = RT_SCOPE_NOWHERE, |
137 | }, |
138 | [RTN_XRESOLVE] = { |
139 | .error = -EINVAL, |
140 | .scope = RT_SCOPE_NOWHERE, |
141 | }, |
142 | }; |
143 | |
144 | static void rt_fibinfo_free(struct rtable __rcu **rtp) |
145 | { |
146 | struct rtable *rt = rcu_dereference_protected(*rtp, 1); |
147 | |
148 | if (!rt) |
149 | return; |
150 | |
151 | /* Not even needed : RCU_INIT_POINTER(*rtp, NULL); |
152 | * because we waited an RCU grace period before calling |
153 | * free_fib_info_rcu() |
154 | */ |
155 | |
156 | dst_dev_put(&rt->dst); |
157 | dst_release_immediate(&rt->dst); |
158 | } |
159 | |
160 | static void free_nh_exceptions(struct fib_nh *nh) |
161 | { |
162 | struct fnhe_hash_bucket *hash; |
163 | int i; |
164 | |
165 | hash = rcu_dereference_protected(nh->nh_exceptions, 1); |
166 | if (!hash) |
167 | return; |
168 | for (i = 0; i < FNHE_HASH_SIZE; i++) { |
169 | struct fib_nh_exception *fnhe; |
170 | |
171 | fnhe = rcu_dereference_protected(hash[i].chain, 1); |
172 | while (fnhe) { |
173 | struct fib_nh_exception *next; |
174 | |
175 | next = rcu_dereference_protected(fnhe->fnhe_next, 1); |
176 | |
177 | rt_fibinfo_free(&fnhe->fnhe_rth_input); |
178 | rt_fibinfo_free(&fnhe->fnhe_rth_output); |
179 | |
180 | kfree(fnhe); |
181 | |
182 | fnhe = next; |
183 | } |
184 | } |
185 | kfree(hash); |
186 | } |
187 | |
188 | static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) |
189 | { |
190 | int cpu; |
191 | |
192 | if (!rtp) |
193 | return; |
194 | |
195 | for_each_possible_cpu(cpu) { |
196 | struct rtable *rt; |
197 | |
198 | rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1); |
199 | if (rt) { |
200 | dst_dev_put(&rt->dst); |
201 | dst_release_immediate(&rt->dst); |
202 | } |
203 | } |
204 | free_percpu(rtp); |
205 | } |
206 | |
207 | /* Release a nexthop info record */ |
208 | static void free_fib_info_rcu(struct rcu_head *head) |
209 | { |
210 | struct fib_info *fi = container_of(head, struct fib_info, rcu); |
211 | |
212 | change_nexthops(fi) { |
213 | if (nexthop_nh->nh_dev) |
214 | dev_put(nexthop_nh->nh_dev); |
215 | lwtstate_put(nexthop_nh->nh_lwtstate); |
216 | free_nh_exceptions(nexthop_nh); |
217 | rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output); |
218 | rt_fibinfo_free(&nexthop_nh->nh_rth_input); |
219 | } endfor_nexthops(fi); |
220 | |
221 | ip_fib_metrics_put(fi->fib_metrics); |
222 | |
223 | kfree(fi); |
224 | } |
225 | |
226 | void free_fib_info(struct fib_info *fi) |
227 | { |
228 | if (fi->fib_dead == 0) { |
229 | pr_warn("Freeing alive fib_info %p\n" , fi); |
230 | return; |
231 | } |
232 | fib_info_cnt--; |
233 | #ifdef CONFIG_IP_ROUTE_CLASSID |
234 | change_nexthops(fi) { |
235 | if (nexthop_nh->nh_tclassid) |
236 | fi->fib_net->ipv4.fib_num_tclassid_users--; |
237 | } endfor_nexthops(fi); |
238 | #endif |
239 | call_rcu(&fi->rcu, free_fib_info_rcu); |
240 | } |
241 | EXPORT_SYMBOL_GPL(free_fib_info); |
242 | |
243 | void fib_release_info(struct fib_info *fi) |
244 | { |
245 | spin_lock_bh(&fib_info_lock); |
246 | if (fi && --fi->fib_treeref == 0) { |
247 | hlist_del(&fi->fib_hash); |
248 | if (fi->fib_prefsrc) |
249 | hlist_del(&fi->fib_lhash); |
250 | change_nexthops(fi) { |
251 | if (!nexthop_nh->nh_dev) |
252 | continue; |
253 | hlist_del(&nexthop_nh->nh_hash); |
254 | } endfor_nexthops(fi) |
255 | fi->fib_dead = 1; |
256 | fib_info_put(fi); |
257 | } |
258 | spin_unlock_bh(&fib_info_lock); |
259 | } |
260 | |
261 | static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) |
262 | { |
263 | const struct fib_nh *onh = ofi->fib_nh; |
264 | |
265 | for_nexthops(fi) { |
266 | if (nh->nh_oif != onh->nh_oif || |
267 | nh->nh_gw != onh->nh_gw || |
268 | nh->nh_scope != onh->nh_scope || |
269 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
270 | nh->nh_weight != onh->nh_weight || |
271 | #endif |
272 | #ifdef CONFIG_IP_ROUTE_CLASSID |
273 | nh->nh_tclassid != onh->nh_tclassid || |
274 | #endif |
275 | lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) || |
276 | ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK)) |
277 | return -1; |
278 | onh++; |
279 | } endfor_nexthops(fi); |
280 | return 0; |
281 | } |
282 | |
283 | static inline unsigned int fib_devindex_hashfn(unsigned int val) |
284 | { |
285 | unsigned int mask = DEVINDEX_HASHSIZE - 1; |
286 | |
287 | return (val ^ |
288 | (val >> DEVINDEX_HASHBITS) ^ |
289 | (val >> (DEVINDEX_HASHBITS * 2))) & mask; |
290 | } |
291 | |
292 | static inline unsigned int fib_info_hashfn(const struct fib_info *fi) |
293 | { |
294 | unsigned int mask = (fib_info_hash_size - 1); |
295 | unsigned int val = fi->fib_nhs; |
296 | |
297 | val ^= (fi->fib_protocol << 8) | fi->fib_scope; |
298 | val ^= (__force u32)fi->fib_prefsrc; |
299 | val ^= fi->fib_priority; |
300 | for_nexthops(fi) { |
301 | val ^= fib_devindex_hashfn(nh->nh_oif); |
302 | } endfor_nexthops(fi) |
303 | |
304 | return (val ^ (val >> 7) ^ (val >> 12)) & mask; |
305 | } |
306 | |
307 | static struct fib_info *fib_find_info(const struct fib_info *nfi) |
308 | { |
309 | struct hlist_head *head; |
310 | struct fib_info *fi; |
311 | unsigned int hash; |
312 | |
313 | hash = fib_info_hashfn(nfi); |
314 | head = &fib_info_hash[hash]; |
315 | |
316 | hlist_for_each_entry(fi, head, fib_hash) { |
317 | if (!net_eq(fi->fib_net, nfi->fib_net)) |
318 | continue; |
319 | if (fi->fib_nhs != nfi->fib_nhs) |
320 | continue; |
321 | if (nfi->fib_protocol == fi->fib_protocol && |
322 | nfi->fib_scope == fi->fib_scope && |
323 | nfi->fib_prefsrc == fi->fib_prefsrc && |
324 | nfi->fib_priority == fi->fib_priority && |
325 | nfi->fib_type == fi->fib_type && |
326 | memcmp(nfi->fib_metrics, fi->fib_metrics, |
327 | sizeof(u32) * RTAX_MAX) == 0 && |
328 | !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) && |
329 | (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) |
330 | return fi; |
331 | } |
332 | |
333 | return NULL; |
334 | } |
335 | |
336 | /* Check, that the gateway is already configured. |
337 | * Used only by redirect accept routine. |
338 | */ |
339 | int ip_fib_check_default(__be32 gw, struct net_device *dev) |
340 | { |
341 | struct hlist_head *head; |
342 | struct fib_nh *nh; |
343 | unsigned int hash; |
344 | |
345 | spin_lock(&fib_info_lock); |
346 | |
347 | hash = fib_devindex_hashfn(dev->ifindex); |
348 | head = &fib_info_devhash[hash]; |
349 | hlist_for_each_entry(nh, head, nh_hash) { |
350 | if (nh->nh_dev == dev && |
351 | nh->nh_gw == gw && |
352 | !(nh->nh_flags & RTNH_F_DEAD)) { |
353 | spin_unlock(&fib_info_lock); |
354 | return 0; |
355 | } |
356 | } |
357 | |
358 | spin_unlock(&fib_info_lock); |
359 | |
360 | return -1; |
361 | } |
362 | |
363 | static inline size_t fib_nlmsg_size(struct fib_info *fi) |
364 | { |
365 | size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) |
366 | + nla_total_size(4) /* RTA_TABLE */ |
367 | + nla_total_size(4) /* RTA_DST */ |
368 | + nla_total_size(4) /* RTA_PRIORITY */ |
369 | + nla_total_size(4) /* RTA_PREFSRC */ |
370 | + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */ |
371 | |
372 | /* space for nested metrics */ |
373 | payload += nla_total_size((RTAX_MAX * nla_total_size(4))); |
374 | |
375 | if (fi->fib_nhs) { |
376 | size_t nh_encapsize = 0; |
377 | /* Also handles the special case fib_nhs == 1 */ |
378 | |
379 | /* each nexthop is packed in an attribute */ |
380 | size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); |
381 | |
382 | /* may contain flow and gateway attribute */ |
383 | nhsize += 2 * nla_total_size(4); |
384 | |
385 | /* grab encap info */ |
386 | for_nexthops(fi) { |
387 | if (nh->nh_lwtstate) { |
388 | /* RTA_ENCAP_TYPE */ |
389 | nh_encapsize += lwtunnel_get_encap_size( |
390 | nh->nh_lwtstate); |
391 | /* RTA_ENCAP */ |
392 | nh_encapsize += nla_total_size(2); |
393 | } |
394 | } endfor_nexthops(fi); |
395 | |
396 | /* all nexthops are packed in a nested attribute */ |
397 | payload += nla_total_size((fi->fib_nhs * nhsize) + |
398 | nh_encapsize); |
399 | |
400 | } |
401 | |
402 | return payload; |
403 | } |
404 | |
405 | void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, |
406 | int dst_len, u32 tb_id, const struct nl_info *info, |
407 | unsigned int nlm_flags) |
408 | { |
409 | struct sk_buff *skb; |
410 | u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; |
411 | int err = -ENOBUFS; |
412 | |
413 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); |
414 | if (!skb) |
415 | goto errout; |
416 | |
417 | err = fib_dump_info(skb, info->portid, seq, event, tb_id, |
418 | fa->fa_type, key, dst_len, |
419 | fa->fa_tos, fa->fa_info, nlm_flags); |
420 | if (err < 0) { |
421 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ |
422 | WARN_ON(err == -EMSGSIZE); |
423 | kfree_skb(skb); |
424 | goto errout; |
425 | } |
426 | rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE, |
427 | info->nlh, GFP_KERNEL); |
428 | return; |
429 | errout: |
430 | if (err < 0) |
431 | rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); |
432 | } |
433 | |
434 | static int fib_detect_death(struct fib_info *fi, int order, |
435 | struct fib_info **last_resort, int *last_idx, |
436 | int dflt) |
437 | { |
438 | struct neighbour *n; |
439 | int state = NUD_NONE; |
440 | |
441 | n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); |
442 | if (n) { |
443 | state = n->nud_state; |
444 | neigh_release(n); |
445 | } else { |
446 | return 0; |
447 | } |
448 | if (state == NUD_REACHABLE) |
449 | return 0; |
450 | if ((state & NUD_VALID) && order != dflt) |
451 | return 0; |
452 | if ((state & NUD_VALID) || |
453 | (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) { |
454 | *last_resort = fi; |
455 | *last_idx = order; |
456 | } |
457 | return 1; |
458 | } |
459 | |
460 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
461 | |
462 | static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining, |
463 | struct netlink_ext_ack *extack) |
464 | { |
465 | int nhs = 0; |
466 | |
467 | while (rtnh_ok(rtnh, remaining)) { |
468 | nhs++; |
469 | rtnh = rtnh_next(rtnh, &remaining); |
470 | } |
471 | |
472 | /* leftover implies invalid nexthop configuration, discard it */ |
473 | if (remaining > 0) { |
474 | NL_SET_ERR_MSG(extack, |
475 | "Invalid nexthop configuration - extra data after nexthops" ); |
476 | nhs = 0; |
477 | } |
478 | |
479 | return nhs; |
480 | } |
481 | |
482 | static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, |
483 | int remaining, struct fib_config *cfg, |
484 | struct netlink_ext_ack *extack) |
485 | { |
486 | int ret; |
487 | |
488 | change_nexthops(fi) { |
489 | int attrlen; |
490 | |
491 | if (!rtnh_ok(rtnh, remaining)) { |
492 | NL_SET_ERR_MSG(extack, |
493 | "Invalid nexthop configuration - extra data after nexthop" ); |
494 | return -EINVAL; |
495 | } |
496 | |
497 | if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) { |
498 | NL_SET_ERR_MSG(extack, |
499 | "Invalid flags for nexthop - can not contain DEAD or LINKDOWN" ); |
500 | return -EINVAL; |
501 | } |
502 | |
503 | nexthop_nh->nh_flags = |
504 | (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; |
505 | nexthop_nh->nh_oif = rtnh->rtnh_ifindex; |
506 | nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; |
507 | |
508 | attrlen = rtnh_attrlen(rtnh); |
509 | if (attrlen > 0) { |
510 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); |
511 | |
512 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
513 | nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0; |
514 | #ifdef CONFIG_IP_ROUTE_CLASSID |
515 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
516 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; |
517 | if (nexthop_nh->nh_tclassid) |
518 | fi->fib_net->ipv4.fib_num_tclassid_users++; |
519 | #endif |
520 | nla = nla_find(attrs, attrlen, RTA_ENCAP); |
521 | if (nla) { |
522 | struct lwtunnel_state *lwtstate; |
523 | struct nlattr *nla_entype; |
524 | |
525 | nla_entype = nla_find(attrs, attrlen, |
526 | RTA_ENCAP_TYPE); |
527 | if (!nla_entype) { |
528 | NL_SET_BAD_ATTR(extack, nla); |
529 | NL_SET_ERR_MSG(extack, |
530 | "Encap type is missing" ); |
531 | goto err_inval; |
532 | } |
533 | |
534 | ret = lwtunnel_build_state(nla_get_u16( |
535 | nla_entype), |
536 | nla, AF_INET, cfg, |
537 | &lwtstate, extack); |
538 | if (ret) |
539 | goto errout; |
540 | nexthop_nh->nh_lwtstate = |
541 | lwtstate_get(lwtstate); |
542 | } |
543 | } |
544 | |
545 | rtnh = rtnh_next(rtnh, &remaining); |
546 | } endfor_nexthops(fi); |
547 | |
548 | return 0; |
549 | |
550 | err_inval: |
551 | ret = -EINVAL; |
552 | |
553 | errout: |
554 | return ret; |
555 | } |
556 | |
557 | static void fib_rebalance(struct fib_info *fi) |
558 | { |
559 | int total; |
560 | int w; |
561 | struct in_device *in_dev; |
562 | |
563 | if (fi->fib_nhs < 2) |
564 | return; |
565 | |
566 | total = 0; |
567 | for_nexthops(fi) { |
568 | if (nh->nh_flags & RTNH_F_DEAD) |
569 | continue; |
570 | |
571 | in_dev = __in_dev_get_rtnl(nh->nh_dev); |
572 | |
573 | if (in_dev && |
574 | IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && |
575 | nh->nh_flags & RTNH_F_LINKDOWN) |
576 | continue; |
577 | |
578 | total += nh->nh_weight; |
579 | } endfor_nexthops(fi); |
580 | |
581 | w = 0; |
582 | change_nexthops(fi) { |
583 | int upper_bound; |
584 | |
585 | in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev); |
586 | |
587 | if (nexthop_nh->nh_flags & RTNH_F_DEAD) { |
588 | upper_bound = -1; |
589 | } else if (in_dev && |
590 | IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && |
591 | nexthop_nh->nh_flags & RTNH_F_LINKDOWN) { |
592 | upper_bound = -1; |
593 | } else { |
594 | w += nexthop_nh->nh_weight; |
595 | upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, |
596 | total) - 1; |
597 | } |
598 | |
599 | atomic_set(&nexthop_nh->nh_upper_bound, upper_bound); |
600 | } endfor_nexthops(fi); |
601 | } |
602 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ |
603 | |
604 | #define fib_rebalance(fi) do { } while (0) |
605 | |
606 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ |
607 | |
608 | static int fib_encap_match(u16 encap_type, |
609 | struct nlattr *encap, |
610 | const struct fib_nh *nh, |
611 | const struct fib_config *cfg, |
612 | struct netlink_ext_ack *extack) |
613 | { |
614 | struct lwtunnel_state *lwtstate; |
615 | int ret, result = 0; |
616 | |
617 | if (encap_type == LWTUNNEL_ENCAP_NONE) |
618 | return 0; |
619 | |
620 | ret = lwtunnel_build_state(encap_type, encap, AF_INET, |
621 | cfg, &lwtstate, extack); |
622 | if (!ret) { |
623 | result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate); |
624 | lwtstate_free(lwtstate); |
625 | } |
626 | |
627 | return result; |
628 | } |
629 | |
630 | int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, |
631 | struct netlink_ext_ack *extack) |
632 | { |
633 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
634 | struct rtnexthop *rtnh; |
635 | int remaining; |
636 | #endif |
637 | |
638 | if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) |
639 | return 1; |
640 | |
641 | if (cfg->fc_oif || cfg->fc_gw) { |
642 | if (cfg->fc_encap) { |
643 | if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap, |
644 | fi->fib_nh, cfg, extack)) |
645 | return 1; |
646 | } |
647 | #ifdef CONFIG_IP_ROUTE_CLASSID |
648 | if (cfg->fc_flow && |
649 | cfg->fc_flow != fi->fib_nh->nh_tclassid) |
650 | return 1; |
651 | #endif |
652 | if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && |
653 | (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) |
654 | return 0; |
655 | return 1; |
656 | } |
657 | |
658 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
659 | if (!cfg->fc_mp) |
660 | return 0; |
661 | |
662 | rtnh = cfg->fc_mp; |
663 | remaining = cfg->fc_mp_len; |
664 | |
665 | for_nexthops(fi) { |
666 | int attrlen; |
667 | |
668 | if (!rtnh_ok(rtnh, remaining)) |
669 | return -EINVAL; |
670 | |
671 | if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif) |
672 | return 1; |
673 | |
674 | attrlen = rtnh_attrlen(rtnh); |
675 | if (attrlen > 0) { |
676 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); |
677 | |
678 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
679 | if (nla && nla_get_in_addr(nla) != nh->nh_gw) |
680 | return 1; |
681 | #ifdef CONFIG_IP_ROUTE_CLASSID |
682 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
683 | if (nla && nla_get_u32(nla) != nh->nh_tclassid) |
684 | return 1; |
685 | #endif |
686 | } |
687 | |
688 | rtnh = rtnh_next(rtnh, &remaining); |
689 | } endfor_nexthops(fi); |
690 | #endif |
691 | return 0; |
692 | } |
693 | |
694 | bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) |
695 | { |
696 | struct nlattr *nla; |
697 | int remaining; |
698 | |
699 | if (!cfg->fc_mx) |
700 | return true; |
701 | |
702 | nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { |
703 | int type = nla_type(nla); |
704 | u32 fi_val, val; |
705 | |
706 | if (!type) |
707 | continue; |
708 | if (type > RTAX_MAX) |
709 | return false; |
710 | |
711 | if (type == RTAX_CC_ALGO) { |
712 | char tmp[TCP_CA_NAME_MAX]; |
713 | bool ecn_ca = false; |
714 | |
715 | nla_strlcpy(tmp, nla, sizeof(tmp)); |
716 | val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca); |
717 | } else { |
718 | if (nla_len(nla) != sizeof(u32)) |
719 | return false; |
720 | val = nla_get_u32(nla); |
721 | } |
722 | |
723 | fi_val = fi->fib_metrics->metrics[type - 1]; |
724 | if (type == RTAX_FEATURES) |
725 | fi_val &= ~DST_FEATURE_ECN_CA; |
726 | |
727 | if (fi_val != val) |
728 | return false; |
729 | } |
730 | |
731 | return true; |
732 | } |
733 | |
734 | |
735 | /* |
736 | * Picture |
737 | * ------- |
738 | * |
739 | * Semantics of nexthop is very messy by historical reasons. |
740 | * We have to take into account, that: |
741 | * a) gateway can be actually local interface address, |
742 | * so that gatewayed route is direct. |
743 | * b) gateway must be on-link address, possibly |
744 | * described not by an ifaddr, but also by a direct route. |
745 | * c) If both gateway and interface are specified, they should not |
746 | * contradict. |
747 | * d) If we use tunnel routes, gateway could be not on-link. |
748 | * |
749 | * Attempt to reconcile all of these (alas, self-contradictory) conditions |
750 | * results in pretty ugly and hairy code with obscure logic. |
751 | * |
752 | * I chose to generalized it instead, so that the size |
753 | * of code does not increase practically, but it becomes |
754 | * much more general. |
755 | * Every prefix is assigned a "scope" value: "host" is local address, |
756 | * "link" is direct route, |
757 | * [ ... "site" ... "interior" ... ] |
758 | * and "universe" is true gateway route with global meaning. |
759 | * |
760 | * Every prefix refers to a set of "nexthop"s (gw, oif), |
761 | * where gw must have narrower scope. This recursion stops |
762 | * when gw has LOCAL scope or if "nexthop" is declared ONLINK, |
763 | * which means that gw is forced to be on link. |
764 | * |
765 | * Code is still hairy, but now it is apparently logically |
766 | * consistent and very flexible. F.e. as by-product it allows |
767 | * to co-exists in peace independent exterior and interior |
768 | * routing processes. |
769 | * |
770 | * Normally it looks as following. |
771 | * |
772 | * {universe prefix} -> (gw, oif) [scope link] |
773 | * | |
774 | * |-> {link prefix} -> (gw, oif) [scope local] |
775 | * | |
776 | * |-> {local prefix} (terminal node) |
777 | */ |
778 | static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh, |
779 | struct netlink_ext_ack *extack) |
780 | { |
781 | int err = 0; |
782 | struct net *net; |
783 | struct net_device *dev; |
784 | |
785 | net = cfg->fc_nlinfo.nl_net; |
786 | if (nh->nh_gw) { |
787 | struct fib_result res; |
788 | |
789 | if (nh->nh_flags & RTNH_F_ONLINK) { |
790 | unsigned int addr_type; |
791 | |
792 | if (cfg->fc_scope >= RT_SCOPE_LINK) { |
793 | NL_SET_ERR_MSG(extack, |
794 | "Nexthop has invalid scope" ); |
795 | return -EINVAL; |
796 | } |
797 | dev = __dev_get_by_index(net, nh->nh_oif); |
798 | if (!dev) { |
799 | NL_SET_ERR_MSG(extack, "Nexthop device required for onlink" ); |
800 | return -ENODEV; |
801 | } |
802 | if (!(dev->flags & IFF_UP)) { |
803 | NL_SET_ERR_MSG(extack, |
804 | "Nexthop device is not up" ); |
805 | return -ENETDOWN; |
806 | } |
807 | addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw); |
808 | if (addr_type != RTN_UNICAST) { |
809 | NL_SET_ERR_MSG(extack, |
810 | "Nexthop has invalid gateway" ); |
811 | return -EINVAL; |
812 | } |
813 | if (!netif_carrier_ok(dev)) |
814 | nh->nh_flags |= RTNH_F_LINKDOWN; |
815 | nh->nh_dev = dev; |
816 | dev_hold(dev); |
817 | nh->nh_scope = RT_SCOPE_LINK; |
818 | return 0; |
819 | } |
820 | rcu_read_lock(); |
821 | { |
822 | struct fib_table *tbl = NULL; |
823 | struct flowi4 fl4 = { |
824 | .daddr = nh->nh_gw, |
825 | .flowi4_scope = cfg->fc_scope + 1, |
826 | .flowi4_oif = nh->nh_oif, |
827 | .flowi4_iif = LOOPBACK_IFINDEX, |
828 | }; |
829 | |
830 | /* It is not necessary, but requires a bit of thinking */ |
831 | if (fl4.flowi4_scope < RT_SCOPE_LINK) |
832 | fl4.flowi4_scope = RT_SCOPE_LINK; |
833 | |
834 | if (cfg->fc_table) |
835 | tbl = fib_get_table(net, cfg->fc_table); |
836 | |
837 | if (tbl) |
838 | err = fib_table_lookup(tbl, &fl4, &res, |
839 | FIB_LOOKUP_IGNORE_LINKSTATE | |
840 | FIB_LOOKUP_NOREF); |
841 | |
842 | /* on error or if no table given do full lookup. This |
843 | * is needed for example when nexthops are in the local |
844 | * table rather than the given table |
845 | */ |
846 | if (!tbl || err) { |
847 | err = fib_lookup(net, &fl4, &res, |
848 | FIB_LOOKUP_IGNORE_LINKSTATE); |
849 | } |
850 | |
851 | if (err) { |
852 | NL_SET_ERR_MSG(extack, |
853 | "Nexthop has invalid gateway" ); |
854 | rcu_read_unlock(); |
855 | return err; |
856 | } |
857 | } |
858 | err = -EINVAL; |
859 | if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) { |
860 | NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway" ); |
861 | goto out; |
862 | } |
863 | nh->nh_scope = res.scope; |
864 | nh->nh_oif = FIB_RES_OIF(res); |
865 | nh->nh_dev = dev = FIB_RES_DEV(res); |
866 | if (!dev) { |
867 | NL_SET_ERR_MSG(extack, |
868 | "No egress device for nexthop gateway" ); |
869 | goto out; |
870 | } |
871 | dev_hold(dev); |
872 | if (!netif_carrier_ok(dev)) |
873 | nh->nh_flags |= RTNH_F_LINKDOWN; |
874 | err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; |
875 | } else { |
876 | struct in_device *in_dev; |
877 | |
878 | if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) { |
879 | NL_SET_ERR_MSG(extack, |
880 | "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set" ); |
881 | return -EINVAL; |
882 | } |
883 | rcu_read_lock(); |
884 | err = -ENODEV; |
885 | in_dev = inetdev_by_index(net, nh->nh_oif); |
886 | if (!in_dev) |
887 | goto out; |
888 | err = -ENETDOWN; |
889 | if (!(in_dev->dev->flags & IFF_UP)) { |
890 | NL_SET_ERR_MSG(extack, "Device for nexthop is not up" ); |
891 | goto out; |
892 | } |
893 | nh->nh_dev = in_dev->dev; |
894 | dev_hold(nh->nh_dev); |
895 | nh->nh_scope = RT_SCOPE_HOST; |
896 | if (!netif_carrier_ok(nh->nh_dev)) |
897 | nh->nh_flags |= RTNH_F_LINKDOWN; |
898 | err = 0; |
899 | } |
900 | out: |
901 | rcu_read_unlock(); |
902 | return err; |
903 | } |
904 | |
905 | static inline unsigned int fib_laddr_hashfn(__be32 val) |
906 | { |
907 | unsigned int mask = (fib_info_hash_size - 1); |
908 | |
909 | return ((__force u32)val ^ |
910 | ((__force u32)val >> 7) ^ |
911 | ((__force u32)val >> 14)) & mask; |
912 | } |
913 | |
914 | static struct hlist_head *fib_info_hash_alloc(int bytes) |
915 | { |
916 | if (bytes <= PAGE_SIZE) |
917 | return kzalloc(bytes, GFP_KERNEL); |
918 | else |
919 | return (struct hlist_head *) |
920 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
921 | get_order(bytes)); |
922 | } |
923 | |
924 | static void fib_info_hash_free(struct hlist_head *hash, int bytes) |
925 | { |
926 | if (!hash) |
927 | return; |
928 | |
929 | if (bytes <= PAGE_SIZE) |
930 | kfree(hash); |
931 | else |
932 | free_pages((unsigned long) hash, get_order(bytes)); |
933 | } |
934 | |
935 | static void fib_info_hash_move(struct hlist_head *new_info_hash, |
936 | struct hlist_head *new_laddrhash, |
937 | unsigned int new_size) |
938 | { |
939 | struct hlist_head *old_info_hash, *old_laddrhash; |
940 | unsigned int old_size = fib_info_hash_size; |
941 | unsigned int i, bytes; |
942 | |
943 | spin_lock_bh(&fib_info_lock); |
944 | old_info_hash = fib_info_hash; |
945 | old_laddrhash = fib_info_laddrhash; |
946 | fib_info_hash_size = new_size; |
947 | |
948 | for (i = 0; i < old_size; i++) { |
949 | struct hlist_head *head = &fib_info_hash[i]; |
950 | struct hlist_node *n; |
951 | struct fib_info *fi; |
952 | |
953 | hlist_for_each_entry_safe(fi, n, head, fib_hash) { |
954 | struct hlist_head *dest; |
955 | unsigned int new_hash; |
956 | |
957 | new_hash = fib_info_hashfn(fi); |
958 | dest = &new_info_hash[new_hash]; |
959 | hlist_add_head(&fi->fib_hash, dest); |
960 | } |
961 | } |
962 | fib_info_hash = new_info_hash; |
963 | |
964 | for (i = 0; i < old_size; i++) { |
965 | struct hlist_head *lhead = &fib_info_laddrhash[i]; |
966 | struct hlist_node *n; |
967 | struct fib_info *fi; |
968 | |
969 | hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) { |
970 | struct hlist_head *ldest; |
971 | unsigned int new_hash; |
972 | |
973 | new_hash = fib_laddr_hashfn(fi->fib_prefsrc); |
974 | ldest = &new_laddrhash[new_hash]; |
975 | hlist_add_head(&fi->fib_lhash, ldest); |
976 | } |
977 | } |
978 | fib_info_laddrhash = new_laddrhash; |
979 | |
980 | spin_unlock_bh(&fib_info_lock); |
981 | |
982 | bytes = old_size * sizeof(struct hlist_head *); |
983 | fib_info_hash_free(old_info_hash, bytes); |
984 | fib_info_hash_free(old_laddrhash, bytes); |
985 | } |
986 | |
987 | __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) |
988 | { |
989 | nh->nh_saddr = inet_select_addr(nh->nh_dev, |
990 | nh->nh_gw, |
991 | nh->nh_parent->fib_scope); |
992 | nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); |
993 | |
994 | return nh->nh_saddr; |
995 | } |
996 | |
997 | static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc) |
998 | { |
999 | if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || |
1000 | fib_prefsrc != cfg->fc_dst) { |
1001 | u32 tb_id = cfg->fc_table; |
1002 | int rc; |
1003 | |
1004 | if (tb_id == RT_TABLE_MAIN) |
1005 | tb_id = RT_TABLE_LOCAL; |
1006 | |
1007 | rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net, |
1008 | fib_prefsrc, tb_id); |
1009 | |
1010 | if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) { |
1011 | rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net, |
1012 | fib_prefsrc, RT_TABLE_LOCAL); |
1013 | } |
1014 | |
1015 | if (rc != RTN_LOCAL) |
1016 | return false; |
1017 | } |
1018 | return true; |
1019 | } |
1020 | |
1021 | struct fib_info *fib_create_info(struct fib_config *cfg, |
1022 | struct netlink_ext_ack *extack) |
1023 | { |
1024 | int err; |
1025 | struct fib_info *fi = NULL; |
1026 | struct fib_info *ofi; |
1027 | int nhs = 1; |
1028 | struct net *net = cfg->fc_nlinfo.nl_net; |
1029 | |
1030 | if (cfg->fc_type > RTN_MAX) |
1031 | goto err_inval; |
1032 | |
1033 | /* Fast check to catch the most weird cases */ |
1034 | if (fib_props[cfg->fc_type].scope > cfg->fc_scope) { |
1035 | NL_SET_ERR_MSG(extack, "Invalid scope" ); |
1036 | goto err_inval; |
1037 | } |
1038 | |
1039 | if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) { |
1040 | NL_SET_ERR_MSG(extack, |
1041 | "Invalid rtm_flags - can not contain DEAD or LINKDOWN" ); |
1042 | goto err_inval; |
1043 | } |
1044 | |
1045 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1046 | if (cfg->fc_mp) { |
1047 | nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len, extack); |
1048 | if (nhs == 0) |
1049 | goto err_inval; |
1050 | } |
1051 | #endif |
1052 | |
1053 | err = -ENOBUFS; |
1054 | if (fib_info_cnt >= fib_info_hash_size) { |
1055 | unsigned int new_size = fib_info_hash_size << 1; |
1056 | struct hlist_head *new_info_hash; |
1057 | struct hlist_head *new_laddrhash; |
1058 | unsigned int bytes; |
1059 | |
1060 | if (!new_size) |
1061 | new_size = 16; |
1062 | bytes = new_size * sizeof(struct hlist_head *); |
1063 | new_info_hash = fib_info_hash_alloc(bytes); |
1064 | new_laddrhash = fib_info_hash_alloc(bytes); |
1065 | if (!new_info_hash || !new_laddrhash) { |
1066 | fib_info_hash_free(new_info_hash, bytes); |
1067 | fib_info_hash_free(new_laddrhash, bytes); |
1068 | } else |
1069 | fib_info_hash_move(new_info_hash, new_laddrhash, new_size); |
1070 | |
1071 | if (!fib_info_hash_size) |
1072 | goto failure; |
1073 | } |
1074 | |
1075 | fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL); |
1076 | if (!fi) |
1077 | goto failure; |
1078 | fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx, |
1079 | cfg->fc_mx_len, extack); |
1080 | if (unlikely(IS_ERR(fi->fib_metrics))) { |
1081 | err = PTR_ERR(fi->fib_metrics); |
1082 | kfree(fi); |
1083 | return ERR_PTR(err); |
1084 | } |
1085 | |
1086 | fib_info_cnt++; |
1087 | fi->fib_net = net; |
1088 | fi->fib_protocol = cfg->fc_protocol; |
1089 | fi->fib_scope = cfg->fc_scope; |
1090 | fi->fib_flags = cfg->fc_flags; |
1091 | fi->fib_priority = cfg->fc_priority; |
1092 | fi->fib_prefsrc = cfg->fc_prefsrc; |
1093 | fi->fib_type = cfg->fc_type; |
1094 | fi->fib_tb_id = cfg->fc_table; |
1095 | |
1096 | fi->fib_nhs = nhs; |
1097 | change_nexthops(fi) { |
1098 | nexthop_nh->nh_parent = fi; |
1099 | nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *); |
1100 | if (!nexthop_nh->nh_pcpu_rth_output) |
1101 | goto failure; |
1102 | } endfor_nexthops(fi) |
1103 | |
1104 | if (cfg->fc_mp) { |
1105 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1106 | err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack); |
1107 | if (err != 0) |
1108 | goto failure; |
1109 | if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) { |
1110 | NL_SET_ERR_MSG(extack, |
1111 | "Nexthop device index does not match RTA_OIF" ); |
1112 | goto err_inval; |
1113 | } |
1114 | if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) { |
1115 | NL_SET_ERR_MSG(extack, |
1116 | "Nexthop gateway does not match RTA_GATEWAY" ); |
1117 | goto err_inval; |
1118 | } |
1119 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1120 | if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) { |
1121 | NL_SET_ERR_MSG(extack, |
1122 | "Nexthop class id does not match RTA_FLOW" ); |
1123 | goto err_inval; |
1124 | } |
1125 | #endif |
1126 | #else |
1127 | NL_SET_ERR_MSG(extack, |
1128 | "Multipath support not enabled in kernel" ); |
1129 | goto err_inval; |
1130 | #endif |
1131 | } else { |
1132 | struct fib_nh *nh = fi->fib_nh; |
1133 | |
1134 | if (cfg->fc_encap) { |
1135 | struct lwtunnel_state *lwtstate; |
1136 | |
1137 | if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE) { |
1138 | NL_SET_ERR_MSG(extack, |
1139 | "LWT encap type not specified" ); |
1140 | goto err_inval; |
1141 | } |
1142 | err = lwtunnel_build_state(cfg->fc_encap_type, |
1143 | cfg->fc_encap, AF_INET, cfg, |
1144 | &lwtstate, extack); |
1145 | if (err) |
1146 | goto failure; |
1147 | |
1148 | nh->nh_lwtstate = lwtstate_get(lwtstate); |
1149 | } |
1150 | nh->nh_oif = cfg->fc_oif; |
1151 | nh->nh_gw = cfg->fc_gw; |
1152 | nh->nh_flags = cfg->fc_flags; |
1153 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1154 | nh->nh_tclassid = cfg->fc_flow; |
1155 | if (nh->nh_tclassid) |
1156 | fi->fib_net->ipv4.fib_num_tclassid_users++; |
1157 | #endif |
1158 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1159 | nh->nh_weight = 1; |
1160 | #endif |
1161 | } |
1162 | |
1163 | if (fib_props[cfg->fc_type].error) { |
1164 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) { |
1165 | NL_SET_ERR_MSG(extack, |
1166 | "Gateway, device and multipath can not be specified for this route type" ); |
1167 | goto err_inval; |
1168 | } |
1169 | goto link_it; |
1170 | } else { |
1171 | switch (cfg->fc_type) { |
1172 | case RTN_UNICAST: |
1173 | case RTN_LOCAL: |
1174 | case RTN_BROADCAST: |
1175 | case RTN_ANYCAST: |
1176 | case RTN_MULTICAST: |
1177 | break; |
1178 | default: |
1179 | NL_SET_ERR_MSG(extack, "Invalid route type" ); |
1180 | goto err_inval; |
1181 | } |
1182 | } |
1183 | |
1184 | if (cfg->fc_scope > RT_SCOPE_HOST) { |
1185 | NL_SET_ERR_MSG(extack, "Invalid scope" ); |
1186 | goto err_inval; |
1187 | } |
1188 | |
1189 | if (cfg->fc_scope == RT_SCOPE_HOST) { |
1190 | struct fib_nh *nh = fi->fib_nh; |
1191 | |
1192 | /* Local address is added. */ |
1193 | if (nhs != 1) { |
1194 | NL_SET_ERR_MSG(extack, |
1195 | "Route with host scope can not have multiple nexthops" ); |
1196 | goto err_inval; |
1197 | } |
1198 | if (nh->nh_gw) { |
1199 | NL_SET_ERR_MSG(extack, |
1200 | "Route with host scope can not have a gateway" ); |
1201 | goto err_inval; |
1202 | } |
1203 | nh->nh_scope = RT_SCOPE_NOWHERE; |
1204 | nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); |
1205 | err = -ENODEV; |
1206 | if (!nh->nh_dev) |
1207 | goto failure; |
1208 | } else { |
1209 | int linkdown = 0; |
1210 | |
1211 | change_nexthops(fi) { |
1212 | err = fib_check_nh(cfg, nexthop_nh, extack); |
1213 | if (err != 0) |
1214 | goto failure; |
1215 | if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN) |
1216 | linkdown++; |
1217 | } endfor_nexthops(fi) |
1218 | if (linkdown == fi->fib_nhs) |
1219 | fi->fib_flags |= RTNH_F_LINKDOWN; |
1220 | } |
1221 | |
1222 | if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) { |
1223 | NL_SET_ERR_MSG(extack, "Invalid prefsrc address" ); |
1224 | goto err_inval; |
1225 | } |
1226 | |
1227 | change_nexthops(fi) { |
1228 | fib_info_update_nh_saddr(net, nexthop_nh); |
1229 | } endfor_nexthops(fi) |
1230 | |
1231 | fib_rebalance(fi); |
1232 | |
1233 | link_it: |
1234 | ofi = fib_find_info(fi); |
1235 | if (ofi) { |
1236 | fi->fib_dead = 1; |
1237 | free_fib_info(fi); |
1238 | ofi->fib_treeref++; |
1239 | return ofi; |
1240 | } |
1241 | |
1242 | fi->fib_treeref++; |
1243 | refcount_set(&fi->fib_clntref, 1); |
1244 | spin_lock_bh(&fib_info_lock); |
1245 | hlist_add_head(&fi->fib_hash, |
1246 | &fib_info_hash[fib_info_hashfn(fi)]); |
1247 | if (fi->fib_prefsrc) { |
1248 | struct hlist_head *head; |
1249 | |
1250 | head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; |
1251 | hlist_add_head(&fi->fib_lhash, head); |
1252 | } |
1253 | change_nexthops(fi) { |
1254 | struct hlist_head *head; |
1255 | unsigned int hash; |
1256 | |
1257 | if (!nexthop_nh->nh_dev) |
1258 | continue; |
1259 | hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); |
1260 | head = &fib_info_devhash[hash]; |
1261 | hlist_add_head(&nexthop_nh->nh_hash, head); |
1262 | } endfor_nexthops(fi) |
1263 | spin_unlock_bh(&fib_info_lock); |
1264 | return fi; |
1265 | |
1266 | err_inval: |
1267 | err = -EINVAL; |
1268 | |
1269 | failure: |
1270 | if (fi) { |
1271 | fi->fib_dead = 1; |
1272 | free_fib_info(fi); |
1273 | } |
1274 | |
1275 | return ERR_PTR(err); |
1276 | } |
1277 | |
1278 | int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, |
1279 | u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, |
1280 | struct fib_info *fi, unsigned int flags) |
1281 | { |
1282 | struct nlmsghdr *nlh; |
1283 | struct rtmsg *rtm; |
1284 | |
1285 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); |
1286 | if (!nlh) |
1287 | return -EMSGSIZE; |
1288 | |
1289 | rtm = nlmsg_data(nlh); |
1290 | rtm->rtm_family = AF_INET; |
1291 | rtm->rtm_dst_len = dst_len; |
1292 | rtm->rtm_src_len = 0; |
1293 | rtm->rtm_tos = tos; |
1294 | if (tb_id < 256) |
1295 | rtm->rtm_table = tb_id; |
1296 | else |
1297 | rtm->rtm_table = RT_TABLE_COMPAT; |
1298 | if (nla_put_u32(skb, RTA_TABLE, tb_id)) |
1299 | goto nla_put_failure; |
1300 | rtm->rtm_type = type; |
1301 | rtm->rtm_flags = fi->fib_flags; |
1302 | rtm->rtm_scope = fi->fib_scope; |
1303 | rtm->rtm_protocol = fi->fib_protocol; |
1304 | |
1305 | if (rtm->rtm_dst_len && |
1306 | nla_put_in_addr(skb, RTA_DST, dst)) |
1307 | goto nla_put_failure; |
1308 | if (fi->fib_priority && |
1309 | nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) |
1310 | goto nla_put_failure; |
1311 | if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0) |
1312 | goto nla_put_failure; |
1313 | |
1314 | if (fi->fib_prefsrc && |
1315 | nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc)) |
1316 | goto nla_put_failure; |
1317 | if (fi->fib_nhs == 1) { |
1318 | if (fi->fib_nh->nh_gw && |
1319 | nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw)) |
1320 | goto nla_put_failure; |
1321 | if (fi->fib_nh->nh_oif && |
1322 | nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif)) |
1323 | goto nla_put_failure; |
1324 | if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) { |
1325 | struct in_device *in_dev; |
1326 | |
1327 | rcu_read_lock(); |
1328 | in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev); |
1329 | if (in_dev && |
1330 | IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) |
1331 | rtm->rtm_flags |= RTNH_F_DEAD; |
1332 | rcu_read_unlock(); |
1333 | } |
1334 | if (fi->fib_nh->nh_flags & RTNH_F_OFFLOAD) |
1335 | rtm->rtm_flags |= RTNH_F_OFFLOAD; |
1336 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1337 | if (fi->fib_nh[0].nh_tclassid && |
1338 | nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) |
1339 | goto nla_put_failure; |
1340 | #endif |
1341 | if (fi->fib_nh->nh_lwtstate && |
1342 | lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0) |
1343 | goto nla_put_failure; |
1344 | } |
1345 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1346 | if (fi->fib_nhs > 1) { |
1347 | struct rtnexthop *rtnh; |
1348 | struct nlattr *mp; |
1349 | |
1350 | mp = nla_nest_start(skb, RTA_MULTIPATH); |
1351 | if (!mp) |
1352 | goto nla_put_failure; |
1353 | |
1354 | for_nexthops(fi) { |
1355 | rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); |
1356 | if (!rtnh) |
1357 | goto nla_put_failure; |
1358 | |
1359 | rtnh->rtnh_flags = nh->nh_flags & 0xFF; |
1360 | if (nh->nh_flags & RTNH_F_LINKDOWN) { |
1361 | struct in_device *in_dev; |
1362 | |
1363 | rcu_read_lock(); |
1364 | in_dev = __in_dev_get_rcu(nh->nh_dev); |
1365 | if (in_dev && |
1366 | IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) |
1367 | rtnh->rtnh_flags |= RTNH_F_DEAD; |
1368 | rcu_read_unlock(); |
1369 | } |
1370 | rtnh->rtnh_hops = nh->nh_weight - 1; |
1371 | rtnh->rtnh_ifindex = nh->nh_oif; |
1372 | |
1373 | if (nh->nh_gw && |
1374 | nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw)) |
1375 | goto nla_put_failure; |
1376 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1377 | if (nh->nh_tclassid && |
1378 | nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) |
1379 | goto nla_put_failure; |
1380 | #endif |
1381 | if (nh->nh_lwtstate && |
1382 | lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0) |
1383 | goto nla_put_failure; |
1384 | |
1385 | /* length of rtnetlink header + attributes */ |
1386 | rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; |
1387 | } endfor_nexthops(fi); |
1388 | |
1389 | nla_nest_end(skb, mp); |
1390 | } |
1391 | #endif |
1392 | nlmsg_end(skb, nlh); |
1393 | return 0; |
1394 | |
1395 | nla_put_failure: |
1396 | nlmsg_cancel(skb, nlh); |
1397 | return -EMSGSIZE; |
1398 | } |
1399 | |
1400 | /* |
1401 | * Update FIB if: |
1402 | * - local address disappeared -> we must delete all the entries |
1403 | * referring to it. |
1404 | * - device went down -> we must shutdown all nexthops going via it. |
1405 | */ |
1406 | int fib_sync_down_addr(struct net_device *dev, __be32 local) |
1407 | { |
1408 | int ret = 0; |
1409 | unsigned int hash = fib_laddr_hashfn(local); |
1410 | struct hlist_head *head = &fib_info_laddrhash[hash]; |
1411 | struct net *net = dev_net(dev); |
1412 | int tb_id = l3mdev_fib_table(dev); |
1413 | struct fib_info *fi; |
1414 | |
1415 | if (!fib_info_laddrhash || local == 0) |
1416 | return 0; |
1417 | |
1418 | hlist_for_each_entry(fi, head, fib_lhash) { |
1419 | if (!net_eq(fi->fib_net, net) || |
1420 | fi->fib_tb_id != tb_id) |
1421 | continue; |
1422 | if (fi->fib_prefsrc == local) { |
1423 | fi->fib_flags |= RTNH_F_DEAD; |
1424 | ret++; |
1425 | } |
1426 | } |
1427 | return ret; |
1428 | } |
1429 | |
1430 | static int call_fib_nh_notifiers(struct fib_nh *fib_nh, |
1431 | enum fib_event_type event_type) |
1432 | { |
1433 | struct in_device *in_dev = __in_dev_get_rtnl(fib_nh->nh_dev); |
1434 | struct fib_nh_notifier_info info = { |
1435 | .fib_nh = fib_nh, |
1436 | }; |
1437 | |
1438 | switch (event_type) { |
1439 | case FIB_EVENT_NH_ADD: |
1440 | if (fib_nh->nh_flags & RTNH_F_DEAD) |
1441 | break; |
1442 | if (IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && |
1443 | fib_nh->nh_flags & RTNH_F_LINKDOWN) |
1444 | break; |
1445 | return call_fib4_notifiers(dev_net(fib_nh->nh_dev), event_type, |
1446 | &info.info); |
1447 | case FIB_EVENT_NH_DEL: |
1448 | if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && |
1449 | fib_nh->nh_flags & RTNH_F_LINKDOWN) || |
1450 | (fib_nh->nh_flags & RTNH_F_DEAD)) |
1451 | return call_fib4_notifiers(dev_net(fib_nh->nh_dev), |
1452 | event_type, &info.info); |
1453 | default: |
1454 | break; |
1455 | } |
1456 | |
1457 | return NOTIFY_DONE; |
1458 | } |
1459 | |
1460 | /* Update the PMTU of exceptions when: |
1461 | * - the new MTU of the first hop becomes smaller than the PMTU |
1462 | * - the old MTU was the same as the PMTU, and it limited discovery of |
1463 | * larger MTUs on the path. With that limit raised, we can now |
1464 | * discover larger MTUs |
1465 | * A special case is locked exceptions, for which the PMTU is smaller |
1466 | * than the minimal accepted PMTU: |
1467 | * - if the new MTU is greater than the PMTU, don't make any change |
1468 | * - otherwise, unlock and set PMTU |
1469 | */ |
1470 | static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig) |
1471 | { |
1472 | struct fnhe_hash_bucket *bucket; |
1473 | int i; |
1474 | |
1475 | bucket = rcu_dereference_protected(nh->nh_exceptions, 1); |
1476 | if (!bucket) |
1477 | return; |
1478 | |
1479 | for (i = 0; i < FNHE_HASH_SIZE; i++) { |
1480 | struct fib_nh_exception *fnhe; |
1481 | |
1482 | for (fnhe = rcu_dereference_protected(bucket[i].chain, 1); |
1483 | fnhe; |
1484 | fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) { |
1485 | if (fnhe->fnhe_mtu_locked) { |
1486 | if (new <= fnhe->fnhe_pmtu) { |
1487 | fnhe->fnhe_pmtu = new; |
1488 | fnhe->fnhe_mtu_locked = false; |
1489 | } |
1490 | } else if (new < fnhe->fnhe_pmtu || |
1491 | orig == fnhe->fnhe_pmtu) { |
1492 | fnhe->fnhe_pmtu = new; |
1493 | } |
1494 | } |
1495 | } |
1496 | } |
1497 | |
1498 | void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) |
1499 | { |
1500 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); |
1501 | struct hlist_head *head = &fib_info_devhash[hash]; |
1502 | struct fib_nh *nh; |
1503 | |
1504 | hlist_for_each_entry(nh, head, nh_hash) { |
1505 | if (nh->nh_dev == dev) |
1506 | nh_update_mtu(nh, dev->mtu, orig_mtu); |
1507 | } |
1508 | } |
1509 | |
1510 | /* Event force Flags Description |
1511 | * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host |
1512 | * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host |
1513 | * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed |
1514 | * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed |
1515 | */ |
1516 | int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) |
1517 | { |
1518 | int ret = 0; |
1519 | int scope = RT_SCOPE_NOWHERE; |
1520 | struct fib_info *prev_fi = NULL; |
1521 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); |
1522 | struct hlist_head *head = &fib_info_devhash[hash]; |
1523 | struct fib_nh *nh; |
1524 | |
1525 | if (force) |
1526 | scope = -1; |
1527 | |
1528 | hlist_for_each_entry(nh, head, nh_hash) { |
1529 | struct fib_info *fi = nh->nh_parent; |
1530 | int dead; |
1531 | |
1532 | BUG_ON(!fi->fib_nhs); |
1533 | if (nh->nh_dev != dev || fi == prev_fi) |
1534 | continue; |
1535 | prev_fi = fi; |
1536 | dead = 0; |
1537 | change_nexthops(fi) { |
1538 | if (nexthop_nh->nh_flags & RTNH_F_DEAD) |
1539 | dead++; |
1540 | else if (nexthop_nh->nh_dev == dev && |
1541 | nexthop_nh->nh_scope != scope) { |
1542 | switch (event) { |
1543 | case NETDEV_DOWN: |
1544 | case NETDEV_UNREGISTER: |
1545 | nexthop_nh->nh_flags |= RTNH_F_DEAD; |
1546 | /* fall through */ |
1547 | case NETDEV_CHANGE: |
1548 | nexthop_nh->nh_flags |= RTNH_F_LINKDOWN; |
1549 | break; |
1550 | } |
1551 | call_fib_nh_notifiers(nexthop_nh, |
1552 | FIB_EVENT_NH_DEL); |
1553 | dead++; |
1554 | } |
1555 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1556 | if (event == NETDEV_UNREGISTER && |
1557 | nexthop_nh->nh_dev == dev) { |
1558 | dead = fi->fib_nhs; |
1559 | break; |
1560 | } |
1561 | #endif |
1562 | } endfor_nexthops(fi) |
1563 | if (dead == fi->fib_nhs) { |
1564 | switch (event) { |
1565 | case NETDEV_DOWN: |
1566 | case NETDEV_UNREGISTER: |
1567 | fi->fib_flags |= RTNH_F_DEAD; |
1568 | /* fall through */ |
1569 | case NETDEV_CHANGE: |
1570 | fi->fib_flags |= RTNH_F_LINKDOWN; |
1571 | break; |
1572 | } |
1573 | ret++; |
1574 | } |
1575 | |
1576 | fib_rebalance(fi); |
1577 | } |
1578 | |
1579 | return ret; |
1580 | } |
1581 | |
1582 | /* Must be invoked inside of an RCU protected region. */ |
1583 | static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) |
1584 | { |
1585 | struct fib_info *fi = NULL, *last_resort = NULL; |
1586 | struct hlist_head *fa_head = res->fa_head; |
1587 | struct fib_table *tb = res->table; |
1588 | u8 slen = 32 - res->prefixlen; |
1589 | int order = -1, last_idx = -1; |
1590 | struct fib_alias *fa, *fa1 = NULL; |
1591 | u32 last_prio = res->fi->fib_priority; |
1592 | u8 last_tos = 0; |
1593 | |
1594 | hlist_for_each_entry_rcu(fa, fa_head, fa_list) { |
1595 | struct fib_info *next_fi = fa->fa_info; |
1596 | |
1597 | if (fa->fa_slen != slen) |
1598 | continue; |
1599 | if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) |
1600 | continue; |
1601 | if (fa->tb_id != tb->tb_id) |
1602 | continue; |
1603 | if (next_fi->fib_priority > last_prio && |
1604 | fa->fa_tos == last_tos) { |
1605 | if (last_tos) |
1606 | continue; |
1607 | break; |
1608 | } |
1609 | if (next_fi->fib_flags & RTNH_F_DEAD) |
1610 | continue; |
1611 | last_tos = fa->fa_tos; |
1612 | last_prio = next_fi->fib_priority; |
1613 | |
1614 | if (next_fi->fib_scope != res->scope || |
1615 | fa->fa_type != RTN_UNICAST) |
1616 | continue; |
1617 | if (!next_fi->fib_nh[0].nh_gw || |
1618 | next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) |
1619 | continue; |
1620 | |
1621 | fib_alias_accessed(fa); |
1622 | |
1623 | if (!fi) { |
1624 | if (next_fi != res->fi) |
1625 | break; |
1626 | fa1 = fa; |
1627 | } else if (!fib_detect_death(fi, order, &last_resort, |
1628 | &last_idx, fa1->fa_default)) { |
1629 | fib_result_assign(res, fi); |
1630 | fa1->fa_default = order; |
1631 | goto out; |
1632 | } |
1633 | fi = next_fi; |
1634 | order++; |
1635 | } |
1636 | |
1637 | if (order <= 0 || !fi) { |
1638 | if (fa1) |
1639 | fa1->fa_default = -1; |
1640 | goto out; |
1641 | } |
1642 | |
1643 | if (!fib_detect_death(fi, order, &last_resort, &last_idx, |
1644 | fa1->fa_default)) { |
1645 | fib_result_assign(res, fi); |
1646 | fa1->fa_default = order; |
1647 | goto out; |
1648 | } |
1649 | |
1650 | if (last_idx >= 0) |
1651 | fib_result_assign(res, last_resort); |
1652 | fa1->fa_default = last_idx; |
1653 | out: |
1654 | return; |
1655 | } |
1656 | |
1657 | /* |
1658 | * Dead device goes up. We wake up dead nexthops. |
1659 | * It takes sense only on multipath routes. |
1660 | */ |
1661 | int fib_sync_up(struct net_device *dev, unsigned int nh_flags) |
1662 | { |
1663 | struct fib_info *prev_fi; |
1664 | unsigned int hash; |
1665 | struct hlist_head *head; |
1666 | struct fib_nh *nh; |
1667 | int ret; |
1668 | |
1669 | if (!(dev->flags & IFF_UP)) |
1670 | return 0; |
1671 | |
1672 | if (nh_flags & RTNH_F_DEAD) { |
1673 | unsigned int flags = dev_get_flags(dev); |
1674 | |
1675 | if (flags & (IFF_RUNNING | IFF_LOWER_UP)) |
1676 | nh_flags |= RTNH_F_LINKDOWN; |
1677 | } |
1678 | |
1679 | prev_fi = NULL; |
1680 | hash = fib_devindex_hashfn(dev->ifindex); |
1681 | head = &fib_info_devhash[hash]; |
1682 | ret = 0; |
1683 | |
1684 | hlist_for_each_entry(nh, head, nh_hash) { |
1685 | struct fib_info *fi = nh->nh_parent; |
1686 | int alive; |
1687 | |
1688 | BUG_ON(!fi->fib_nhs); |
1689 | if (nh->nh_dev != dev || fi == prev_fi) |
1690 | continue; |
1691 | |
1692 | prev_fi = fi; |
1693 | alive = 0; |
1694 | change_nexthops(fi) { |
1695 | if (!(nexthop_nh->nh_flags & nh_flags)) { |
1696 | alive++; |
1697 | continue; |
1698 | } |
1699 | if (!nexthop_nh->nh_dev || |
1700 | !(nexthop_nh->nh_dev->flags & IFF_UP)) |
1701 | continue; |
1702 | if (nexthop_nh->nh_dev != dev || |
1703 | !__in_dev_get_rtnl(dev)) |
1704 | continue; |
1705 | alive++; |
1706 | nexthop_nh->nh_flags &= ~nh_flags; |
1707 | call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD); |
1708 | } endfor_nexthops(fi) |
1709 | |
1710 | if (alive > 0) { |
1711 | fi->fib_flags &= ~nh_flags; |
1712 | ret++; |
1713 | } |
1714 | |
1715 | fib_rebalance(fi); |
1716 | } |
1717 | |
1718 | return ret; |
1719 | } |
1720 | |
1721 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1722 | static bool fib_good_nh(const struct fib_nh *nh) |
1723 | { |
1724 | int state = NUD_REACHABLE; |
1725 | |
1726 | if (nh->nh_scope == RT_SCOPE_LINK) { |
1727 | struct neighbour *n; |
1728 | |
1729 | rcu_read_lock_bh(); |
1730 | |
1731 | n = __ipv4_neigh_lookup_noref(nh->nh_dev, |
1732 | (__force u32)nh->nh_gw); |
1733 | if (n) |
1734 | state = n->nud_state; |
1735 | |
1736 | rcu_read_unlock_bh(); |
1737 | } |
1738 | |
1739 | return !!(state & NUD_VALID); |
1740 | } |
1741 | |
1742 | void fib_select_multipath(struct fib_result *res, int hash) |
1743 | { |
1744 | struct fib_info *fi = res->fi; |
1745 | struct net *net = fi->fib_net; |
1746 | bool first = false; |
1747 | |
1748 | for_nexthops(fi) { |
1749 | if (net->ipv4.sysctl_fib_multipath_use_neigh) { |
1750 | if (!fib_good_nh(nh)) |
1751 | continue; |
1752 | if (!first) { |
1753 | res->nh_sel = nhsel; |
1754 | first = true; |
1755 | } |
1756 | } |
1757 | |
1758 | if (hash > atomic_read(&nh->nh_upper_bound)) |
1759 | continue; |
1760 | |
1761 | res->nh_sel = nhsel; |
1762 | return; |
1763 | } endfor_nexthops(fi); |
1764 | } |
1765 | #endif |
1766 | |
1767 | void fib_select_path(struct net *net, struct fib_result *res, |
1768 | struct flowi4 *fl4, const struct sk_buff *skb) |
1769 | { |
1770 | if (fl4->flowi4_oif && !(fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) |
1771 | goto check_saddr; |
1772 | |
1773 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1774 | if (res->fi->fib_nhs > 1) { |
1775 | int h = fib_multipath_hash(net, fl4, skb, NULL); |
1776 | |
1777 | fib_select_multipath(res, h); |
1778 | } |
1779 | else |
1780 | #endif |
1781 | if (!res->prefixlen && |
1782 | res->table->tb_num_default > 1 && |
1783 | res->type == RTN_UNICAST) |
1784 | fib_select_default(fl4, res); |
1785 | |
1786 | check_saddr: |
1787 | if (!fl4->saddr) |
1788 | fl4->saddr = FIB_RES_PREFSRC(net, *res); |
1789 | } |
1790 | |