1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. |
5 | * |
6 | * The Internet Protocol (IP) module. |
7 | * |
8 | * Authors: Ross Biro |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Donald Becker, <becker@super.org> |
11 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
12 | * Richard Underwood |
13 | * Stefan Becker, <stefanb@yello.ping.de> |
14 | * Jorge Cwik, <jorge@laser.satlink.net> |
15 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
16 | * |
17 | * |
18 | * Fixes: |
19 | * Alan Cox : Commented a couple of minor bits of surplus code |
20 | * Alan Cox : Undefining IP_FORWARD doesn't include the code |
21 | * (just stops a compiler warning). |
22 | * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes |
23 | * are junked rather than corrupting things. |
24 | * Alan Cox : Frames to bad broadcast subnets are dumped |
25 | * We used to process them non broadcast and |
26 | * boy could that cause havoc. |
27 | * Alan Cox : ip_forward sets the free flag on the |
28 | * new frame it queues. Still crap because |
29 | * it copies the frame but at least it |
30 | * doesn't eat memory too. |
31 | * Alan Cox : Generic queue code and memory fixes. |
32 | * Fred Van Kempen : IP fragment support (borrowed from NET2E) |
33 | * Gerhard Koerting: Forward fragmented frames correctly. |
34 | * Gerhard Koerting: Fixes to my fix of the above 8-). |
35 | * Gerhard Koerting: IP interface addressing fix. |
36 | * Linus Torvalds : More robustness checks |
37 | * Alan Cox : Even more checks: Still not as robust as it ought to be |
38 | * Alan Cox : Save IP header pointer for later |
39 | * Alan Cox : ip option setting |
40 | * Alan Cox : Use ip_tos/ip_ttl settings |
41 | * Alan Cox : Fragmentation bogosity removed |
42 | * (Thanks to Mark.Bush@prg.ox.ac.uk) |
43 | * Dmitry Gorodchanin : Send of a raw packet crash fix. |
44 | * Alan Cox : Silly ip bug when an overlength |
45 | * fragment turns up. Now frees the |
46 | * queue. |
47 | * Linus Torvalds/ : Memory leakage on fragmentation |
48 | * Alan Cox : handling. |
49 | * Gerhard Koerting: Forwarding uses IP priority hints |
50 | * Teemu Rantanen : Fragment problems. |
51 | * Alan Cox : General cleanup, comments and reformat |
52 | * Alan Cox : SNMP statistics |
53 | * Alan Cox : BSD address rule semantics. Also see |
54 | * UDP as there is a nasty checksum issue |
55 | * if you do things the wrong way. |
56 | * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file |
57 | * Alan Cox : IP options adjust sk->priority. |
58 | * Pedro Roque : Fix mtu/length error in ip_forward. |
59 | * Alan Cox : Avoid ip_chk_addr when possible. |
60 | * Richard Underwood : IP multicasting. |
61 | * Alan Cox : Cleaned up multicast handlers. |
62 | * Alan Cox : RAW sockets demultiplex in the BSD style. |
63 | * Gunther Mayer : Fix the SNMP reporting typo |
64 | * Alan Cox : Always in group 224.0.0.1 |
65 | * Pauline Middelink : Fast ip_checksum update when forwarding |
66 | * Masquerading support. |
67 | * Alan Cox : Multicast loopback error for 224.0.0.1 |
68 | * Alan Cox : IP_MULTICAST_LOOP option. |
69 | * Alan Cox : Use notifiers. |
70 | * Bjorn Ekwall : Removed ip_csum (from slhc.c too) |
71 | * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!) |
72 | * Stefan Becker : Send out ICMP HOST REDIRECT |
73 | * Arnt Gulbrandsen : ip_build_xmit |
74 | * Alan Cox : Per socket routing cache |
75 | * Alan Cox : Fixed routing cache, added header cache. |
76 | * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it. |
77 | * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net. |
78 | * Alan Cox : Incoming IP option handling. |
79 | * Alan Cox : Set saddr on raw output frames as per BSD. |
80 | * Alan Cox : Stopped broadcast source route explosions. |
81 | * Alan Cox : Can disable source routing |
82 | * Takeshi Sone : Masquerading didn't work. |
83 | * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible. |
84 | * Alan Cox : Memory leaks, tramples, misc debugging. |
85 | * Alan Cox : Fixed multicast (by popular demand 8)) |
86 | * Alan Cox : Fixed forwarding (by even more popular demand 8)) |
87 | * Alan Cox : Fixed SNMP statistics [I think] |
88 | * Gerhard Koerting : IP fragmentation forwarding fix |
89 | * Alan Cox : Device lock against page fault. |
90 | * Alan Cox : IP_HDRINCL facility. |
91 | * Werner Almesberger : Zero fragment bug |
92 | * Alan Cox : RAW IP frame length bug |
93 | * Alan Cox : Outgoing firewall on build_xmit |
94 | * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel |
95 | * Alan Cox : Multicast routing hooks |
96 | * Jos Vos : Do accounting *before* call_in_firewall |
97 | * Willy Konynenberg : Transparent proxying support |
98 | * |
99 | * |
100 | * |
101 | * To Fix: |
102 | * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient |
103 | * and could be made very efficient with the addition of some virtual memory hacks to permit |
104 | * the allocation of a buffer that can then be 'grown' by twiddling page tables. |
105 | * Output fragmentation wants updating along with the buffer management to use a single |
106 | * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet |
107 | * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause |
108 | * fragmentation anyway. |
109 | * |
110 | * This program is free software; you can redistribute it and/or |
111 | * modify it under the terms of the GNU General Public License |
112 | * as published by the Free Software Foundation; either version |
113 | * 2 of the License, or (at your option) any later version. |
114 | */ |
115 | |
116 | #define pr_fmt(fmt) "IPv4: " fmt |
117 | |
118 | #include <linux/module.h> |
119 | #include <linux/types.h> |
120 | #include <linux/kernel.h> |
121 | #include <linux/string.h> |
122 | #include <linux/errno.h> |
123 | #include <linux/slab.h> |
124 | |
125 | #include <linux/net.h> |
126 | #include <linux/socket.h> |
127 | #include <linux/sockios.h> |
128 | #include <linux/in.h> |
129 | #include <linux/inet.h> |
130 | #include <linux/inetdevice.h> |
131 | #include <linux/netdevice.h> |
132 | #include <linux/etherdevice.h> |
133 | |
134 | #include <net/snmp.h> |
135 | #include <net/ip.h> |
136 | #include <net/protocol.h> |
137 | #include <net/route.h> |
138 | #include <linux/skbuff.h> |
139 | #include <net/sock.h> |
140 | #include <net/arp.h> |
141 | #include <net/icmp.h> |
142 | #include <net/raw.h> |
143 | #include <net/checksum.h> |
144 | #include <net/inet_ecn.h> |
145 | #include <linux/netfilter_ipv4.h> |
146 | #include <net/xfrm.h> |
147 | #include <linux/mroute.h> |
148 | #include <linux/netlink.h> |
149 | #include <net/dst_metadata.h> |
150 | |
151 | /* |
152 | * Process Router Attention IP option (RFC 2113) |
153 | */ |
154 | bool ip_call_ra_chain(struct sk_buff *skb) |
155 | { |
156 | struct ip_ra_chain *ra; |
157 | u8 protocol = ip_hdr(skb)->protocol; |
158 | struct sock *last = NULL; |
159 | struct net_device *dev = skb->dev; |
160 | struct net *net = dev_net(dev); |
161 | |
162 | for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) { |
163 | struct sock *sk = ra->sk; |
164 | |
165 | /* If socket is bound to an interface, only report |
166 | * the packet if it came from that interface. |
167 | */ |
168 | if (sk && inet_sk(sk)->inet_num == protocol && |
169 | (!sk->sk_bound_dev_if || |
170 | sk->sk_bound_dev_if == dev->ifindex)) { |
171 | if (ip_is_fragment(ip_hdr(skb))) { |
172 | if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN)) |
173 | return true; |
174 | } |
175 | if (last) { |
176 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
177 | if (skb2) |
178 | raw_rcv(last, skb2); |
179 | } |
180 | last = sk; |
181 | } |
182 | } |
183 | |
184 | if (last) { |
185 | raw_rcv(last, skb); |
186 | return true; |
187 | } |
188 | return false; |
189 | } |
190 | |
191 | void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol) |
192 | { |
193 | const struct net_protocol *ipprot; |
194 | int raw, ret; |
195 | |
196 | resubmit: |
197 | raw = raw_local_deliver(skb, protocol); |
198 | |
199 | ipprot = rcu_dereference(inet_protos[protocol]); |
200 | if (ipprot) { |
201 | if (!ipprot->no_policy) { |
202 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
203 | kfree_skb(skb); |
204 | return; |
205 | } |
206 | nf_reset(skb); |
207 | } |
208 | ret = ipprot->handler(skb); |
209 | if (ret < 0) { |
210 | protocol = -ret; |
211 | goto resubmit; |
212 | } |
213 | __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS); |
214 | } else { |
215 | if (!raw) { |
216 | if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
217 | __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS); |
218 | icmp_send(skb, ICMP_DEST_UNREACH, |
219 | ICMP_PROT_UNREACH, 0); |
220 | } |
221 | kfree_skb(skb); |
222 | } else { |
223 | __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS); |
224 | consume_skb(skb); |
225 | } |
226 | } |
227 | } |
228 | |
229 | static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
230 | { |
231 | __skb_pull(skb, skb_network_header_len(skb)); |
232 | |
233 | rcu_read_lock(); |
234 | ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol); |
235 | rcu_read_unlock(); |
236 | |
237 | return 0; |
238 | } |
239 | |
240 | /* |
241 | * Deliver IP Packets to the higher protocol layers. |
242 | */ |
243 | int ip_local_deliver(struct sk_buff *skb) |
244 | { |
245 | /* |
246 | * Reassemble IP fragments. |
247 | */ |
248 | struct net *net = dev_net(skb->dev); |
249 | |
250 | if (ip_is_fragment(ip_hdr(skb))) { |
251 | if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER)) |
252 | return 0; |
253 | } |
254 | |
255 | return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, |
256 | net, NULL, skb, skb->dev, NULL, |
257 | ip_local_deliver_finish); |
258 | } |
259 | |
260 | static inline bool ip_rcv_options(struct sk_buff *skb) |
261 | { |
262 | struct ip_options *opt; |
263 | const struct iphdr *iph; |
264 | struct net_device *dev = skb->dev; |
265 | |
266 | /* It looks as overkill, because not all |
267 | IP options require packet mangling. |
268 | But it is the easiest for now, especially taking |
269 | into account that combination of IP options |
270 | and running sniffer is extremely rare condition. |
271 | --ANK (980813) |
272 | */ |
273 | if (skb_cow(skb, skb_headroom(skb))) { |
274 | __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS); |
275 | goto drop; |
276 | } |
277 | |
278 | iph = ip_hdr(skb); |
279 | opt = &(IPCB(skb)->opt); |
280 | opt->optlen = iph->ihl*4 - sizeof(struct iphdr); |
281 | |
282 | if (ip_options_compile(dev_net(dev), opt, skb)) { |
283 | __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS); |
284 | goto drop; |
285 | } |
286 | |
287 | if (unlikely(opt->srr)) { |
288 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
289 | |
290 | if (in_dev) { |
291 | if (!IN_DEV_SOURCE_ROUTE(in_dev)) { |
292 | if (IN_DEV_LOG_MARTIANS(in_dev)) |
293 | net_info_ratelimited("source route option %pI4 -> %pI4\n" , |
294 | &iph->saddr, |
295 | &iph->daddr); |
296 | goto drop; |
297 | } |
298 | } |
299 | |
300 | if (ip_options_rcv_srr(skb)) |
301 | goto drop; |
302 | } |
303 | |
304 | return false; |
305 | drop: |
306 | return true; |
307 | } |
308 | |
309 | static int ip_rcv_finish_core(struct net *net, struct sock *sk, |
310 | struct sk_buff *skb, struct net_device *dev) |
311 | { |
312 | const struct iphdr *iph = ip_hdr(skb); |
313 | int (*edemux)(struct sk_buff *skb); |
314 | struct rtable *rt; |
315 | int err; |
316 | |
317 | if (net->ipv4.sysctl_ip_early_demux && |
318 | !skb_dst(skb) && |
319 | !skb->sk && |
320 | !ip_is_fragment(iph)) { |
321 | const struct net_protocol *ipprot; |
322 | int protocol = iph->protocol; |
323 | |
324 | ipprot = rcu_dereference(inet_protos[protocol]); |
325 | if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { |
326 | err = edemux(skb); |
327 | if (unlikely(err)) |
328 | goto drop_error; |
329 | /* must reload iph, skb->head might have changed */ |
330 | iph = ip_hdr(skb); |
331 | } |
332 | } |
333 | |
334 | /* |
335 | * Initialise the virtual path cache for the packet. It describes |
336 | * how the packet travels inside Linux networking. |
337 | */ |
338 | if (!skb_valid_dst(skb)) { |
339 | err = ip_route_input_noref(skb, iph->daddr, iph->saddr, |
340 | iph->tos, dev); |
341 | if (unlikely(err)) |
342 | goto drop_error; |
343 | } |
344 | |
345 | #ifdef CONFIG_IP_ROUTE_CLASSID |
346 | if (unlikely(skb_dst(skb)->tclassid)) { |
347 | struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct); |
348 | u32 idx = skb_dst(skb)->tclassid; |
349 | st[idx&0xFF].o_packets++; |
350 | st[idx&0xFF].o_bytes += skb->len; |
351 | st[(idx>>16)&0xFF].i_packets++; |
352 | st[(idx>>16)&0xFF].i_bytes += skb->len; |
353 | } |
354 | #endif |
355 | |
356 | if (iph->ihl > 5 && ip_rcv_options(skb)) |
357 | goto drop; |
358 | |
359 | rt = skb_rtable(skb); |
360 | if (rt->rt_type == RTN_MULTICAST) { |
361 | __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len); |
362 | } else if (rt->rt_type == RTN_BROADCAST) { |
363 | __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len); |
364 | } else if (skb->pkt_type == PACKET_BROADCAST || |
365 | skb->pkt_type == PACKET_MULTICAST) { |
366 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
367 | |
368 | /* RFC 1122 3.3.6: |
369 | * |
370 | * When a host sends a datagram to a link-layer broadcast |
371 | * address, the IP destination address MUST be a legal IP |
372 | * broadcast or IP multicast address. |
373 | * |
374 | * A host SHOULD silently discard a datagram that is received |
375 | * via a link-layer broadcast (see Section 2.4) but does not |
376 | * specify an IP multicast or broadcast destination address. |
377 | * |
378 | * This doesn't explicitly say L2 *broadcast*, but broadcast is |
379 | * in a way a form of multicast and the most common use case for |
380 | * this is 802.11 protecting against cross-station spoofing (the |
381 | * so-called "hole-196" attack) so do it for both. |
382 | */ |
383 | if (in_dev && |
384 | IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST)) |
385 | goto drop; |
386 | } |
387 | |
388 | return NET_RX_SUCCESS; |
389 | |
390 | drop: |
391 | kfree_skb(skb); |
392 | return NET_RX_DROP; |
393 | |
394 | drop_error: |
395 | if (err == -EXDEV) |
396 | __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); |
397 | goto drop; |
398 | } |
399 | |
400 | static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
401 | { |
402 | struct net_device *dev = skb->dev; |
403 | int ret; |
404 | |
405 | /* if ingress device is enslaved to an L3 master device pass the |
406 | * skb to its handler for processing |
407 | */ |
408 | skb = l3mdev_ip_rcv(skb); |
409 | if (!skb) |
410 | return NET_RX_SUCCESS; |
411 | |
412 | ret = ip_rcv_finish_core(net, sk, skb, dev); |
413 | if (ret != NET_RX_DROP) |
414 | ret = dst_input(skb); |
415 | return ret; |
416 | } |
417 | |
418 | /* |
419 | * Main IP Receive routine. |
420 | */ |
421 | static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) |
422 | { |
423 | const struct iphdr *iph; |
424 | u32 len; |
425 | |
426 | /* When the interface is in promisc. mode, drop all the crap |
427 | * that it receives, do not try to analyse it. |
428 | */ |
429 | if (skb->pkt_type == PACKET_OTHERHOST) |
430 | goto drop; |
431 | |
432 | __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); |
433 | |
434 | skb = skb_share_check(skb, GFP_ATOMIC); |
435 | if (!skb) { |
436 | __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS); |
437 | goto out; |
438 | } |
439 | |
440 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) |
441 | goto inhdr_error; |
442 | |
443 | iph = ip_hdr(skb); |
444 | |
445 | /* |
446 | * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum. |
447 | * |
448 | * Is the datagram acceptable? |
449 | * |
450 | * 1. Length at least the size of an ip header |
451 | * 2. Version of 4 |
452 | * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums] |
453 | * 4. Doesn't have a bogus length |
454 | */ |
455 | |
456 | if (iph->ihl < 5 || iph->version != 4) |
457 | goto inhdr_error; |
458 | |
459 | BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1); |
460 | BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0); |
461 | BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE); |
462 | __IP_ADD_STATS(net, |
463 | IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK), |
464 | max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); |
465 | |
466 | if (!pskb_may_pull(skb, iph->ihl*4)) |
467 | goto inhdr_error; |
468 | |
469 | iph = ip_hdr(skb); |
470 | |
471 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
472 | goto csum_error; |
473 | |
474 | len = ntohs(iph->tot_len); |
475 | if (skb->len < len) { |
476 | __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS); |
477 | goto drop; |
478 | } else if (len < (iph->ihl*4)) |
479 | goto inhdr_error; |
480 | |
481 | /* Our transport medium may have padded the buffer out. Now we know it |
482 | * is IP we can trim to the true length of the frame. |
483 | * Note this now means skb->len holds ntohs(iph->tot_len). |
484 | */ |
485 | if (pskb_trim_rcsum(skb, len)) { |
486 | __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS); |
487 | goto drop; |
488 | } |
489 | |
490 | iph = ip_hdr(skb); |
491 | skb->transport_header = skb->network_header + iph->ihl*4; |
492 | |
493 | /* Remove any debris in the socket control block */ |
494 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
495 | IPCB(skb)->iif = skb->skb_iif; |
496 | |
497 | /* Must drop socket now because of tproxy. */ |
498 | skb_orphan(skb); |
499 | |
500 | return skb; |
501 | |
502 | csum_error: |
503 | __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); |
504 | inhdr_error: |
505 | __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); |
506 | drop: |
507 | kfree_skb(skb); |
508 | out: |
509 | return NULL; |
510 | } |
511 | |
512 | /* |
513 | * IP receive entry point |
514 | */ |
515 | int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, |
516 | struct net_device *orig_dev) |
517 | { |
518 | struct net *net = dev_net(dev); |
519 | |
520 | skb = ip_rcv_core(skb, net); |
521 | if (skb == NULL) |
522 | return NET_RX_DROP; |
523 | |
524 | return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, |
525 | net, NULL, skb, dev, NULL, |
526 | ip_rcv_finish); |
527 | } |
528 | |
529 | static void ip_sublist_rcv_finish(struct list_head *head) |
530 | { |
531 | struct sk_buff *skb, *next; |
532 | |
533 | list_for_each_entry_safe(skb, next, head, list) { |
534 | skb_list_del_init(skb); |
535 | dst_input(skb); |
536 | } |
537 | } |
538 | |
539 | static void ip_list_rcv_finish(struct net *net, struct sock *sk, |
540 | struct list_head *head) |
541 | { |
542 | struct dst_entry *curr_dst = NULL; |
543 | struct sk_buff *skb, *next; |
544 | struct list_head sublist; |
545 | |
546 | INIT_LIST_HEAD(&sublist); |
547 | list_for_each_entry_safe(skb, next, head, list) { |
548 | struct net_device *dev = skb->dev; |
549 | struct dst_entry *dst; |
550 | |
551 | skb_list_del_init(skb); |
552 | /* if ingress device is enslaved to an L3 master device pass the |
553 | * skb to its handler for processing |
554 | */ |
555 | skb = l3mdev_ip_rcv(skb); |
556 | if (!skb) |
557 | continue; |
558 | if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP) |
559 | continue; |
560 | |
561 | dst = skb_dst(skb); |
562 | if (curr_dst != dst) { |
563 | /* dispatch old sublist */ |
564 | if (!list_empty(&sublist)) |
565 | ip_sublist_rcv_finish(&sublist); |
566 | /* start new sublist */ |
567 | INIT_LIST_HEAD(&sublist); |
568 | curr_dst = dst; |
569 | } |
570 | list_add_tail(&skb->list, &sublist); |
571 | } |
572 | /* dispatch final sublist */ |
573 | ip_sublist_rcv_finish(&sublist); |
574 | } |
575 | |
576 | static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, |
577 | struct net *net) |
578 | { |
579 | NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL, |
580 | head, dev, NULL, ip_rcv_finish); |
581 | ip_list_rcv_finish(net, NULL, head); |
582 | } |
583 | |
584 | /* Receive a list of IP packets */ |
585 | void ip_list_rcv(struct list_head *head, struct packet_type *pt, |
586 | struct net_device *orig_dev) |
587 | { |
588 | struct net_device *curr_dev = NULL; |
589 | struct net *curr_net = NULL; |
590 | struct sk_buff *skb, *next; |
591 | struct list_head sublist; |
592 | |
593 | INIT_LIST_HEAD(&sublist); |
594 | list_for_each_entry_safe(skb, next, head, list) { |
595 | struct net_device *dev = skb->dev; |
596 | struct net *net = dev_net(dev); |
597 | |
598 | skb_list_del_init(skb); |
599 | skb = ip_rcv_core(skb, net); |
600 | if (skb == NULL) |
601 | continue; |
602 | |
603 | if (curr_dev != dev || curr_net != net) { |
604 | /* dispatch old sublist */ |
605 | if (!list_empty(&sublist)) |
606 | ip_sublist_rcv(&sublist, curr_dev, curr_net); |
607 | /* start new sublist */ |
608 | INIT_LIST_HEAD(&sublist); |
609 | curr_dev = dev; |
610 | curr_net = net; |
611 | } |
612 | list_add_tail(&skb->list, &sublist); |
613 | } |
614 | /* dispatch final sublist */ |
615 | ip_sublist_rcv(&sublist, curr_dev, curr_net); |
616 | } |
617 | |