1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Authors: Lotsa people, from code originally in tcp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _INET_HASHTABLES_H
15#define _INET_HASHTABLES_H
16
17
18#include <linux/interrupt.h>
19#include <linux/ip.h>
20#include <linux/ipv6.h>
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/socket.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/wait.h>
27
28#include <net/inet_connection_sock.h>
29#include <net/inet_sock.h>
30#include <net/sock.h>
31#include <net/route.h>
32#include <net/tcp_states.h>
33#include <net/netns/hash.h>
34
35#include <linux/refcount.h>
36#include <asm/byteorder.h>
37
38/* This is for all connections with a full identity, no wildcards.
39 * The 'e' prefix stands for Establish, but we really put all sockets
40 * but LISTEN ones.
41 */
42struct inet_ehash_bucket {
43 struct hlist_nulls_head chain;
44};
45
46/* There are a few simple rules, which allow for local port reuse by
47 * an application. In essence:
48 *
49 * 1) Sockets bound to different interfaces may share a local port.
50 * Failing that, goto test 2.
51 * 2) If all sockets have sk->sk_reuse set, and none of them are in
52 * TCP_LISTEN state, the port may be shared.
53 * Failing that, goto test 3.
54 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
55 * address, and none of them are the same, the port may be
56 * shared.
57 * Failing this, the port cannot be shared.
58 *
59 * The interesting point, is test #2. This is what an FTP server does
60 * all day. To optimize this case we use a specific flag bit defined
61 * below. As we add sockets to a bind bucket list, we perform a
62 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
63 * As long as all sockets added to a bind bucket pass this test,
64 * the flag bit will be set.
65 * The resulting situation is that tcp_v[46]_verify_bind() can just check
66 * for this flag bit, if it is set and the socket trying to bind has
67 * sk->sk_reuse set, we don't even have to walk the owners list at all,
68 * we return that it is ok to bind this socket to the requested local port.
69 *
70 * Sounds like a lot of work, but it is worth it. In a more naive
71 * implementation (ie. current FreeBSD etc.) the entire list of ports
72 * must be walked for each data port opened by an ftp server. Needless
73 * to say, this does not scale at all. With a couple thousand FTP
74 * users logged onto your box, isn't it nice to know that new data
75 * ports are created in O(1) time? I thought so. ;-) -DaveM
76 */
77#define FASTREUSEPORT_ANY 1
78#define FASTREUSEPORT_STRICT 2
79
80struct inet_bind_bucket {
81 possible_net_t ib_net;
82 int l3mdev;
83 unsigned short port;
84 signed char fastreuse;
85 signed char fastreuseport;
86 kuid_t fastuid;
87#if IS_ENABLED(CONFIG_IPV6)
88 struct in6_addr fast_v6_rcv_saddr;
89#endif
90 __be32 fast_rcv_saddr;
91 unsigned short fast_sk_family;
92 bool fast_ipv6_only;
93 struct hlist_node node;
94 struct hlist_head owners;
95};
96
97static inline struct net *ib_net(struct inet_bind_bucket *ib)
98{
99 return read_pnet(&ib->ib_net);
100}
101
102#define inet_bind_bucket_for_each(tb, head) \
103 hlist_for_each_entry(tb, head, node)
104
105struct inet_bind_hashbucket {
106 spinlock_t lock;
107 struct hlist_head chain;
108};
109
110/*
111 * Sockets can be hashed in established or listening table
112 */
113struct inet_listen_hashbucket {
114 spinlock_t lock;
115 unsigned int count;
116 struct hlist_head head;
117};
118
119/* This is for listening sockets, thus all sockets which possess wildcards. */
120#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
121
122struct inet_hashinfo {
123 /* This is for sockets with full identity only. Sockets here will
124 * always be without wildcards and will have the following invariant:
125 *
126 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
127 *
128 */
129 struct inet_ehash_bucket *ehash;
130 spinlock_t *ehash_locks;
131 unsigned int ehash_mask;
132 unsigned int ehash_locks_mask;
133
134 /* Ok, let's try this, I give up, we do need a local binding
135 * TCP hash as well as the others for fast bind/connect.
136 */
137 struct kmem_cache *bind_bucket_cachep;
138 struct inet_bind_hashbucket *bhash;
139 unsigned int bhash_size;
140
141 /* The 2nd listener table hashed by local port and address */
142 unsigned int lhash2_mask;
143 struct inet_listen_hashbucket *lhash2;
144
145 /* All the above members are written once at bootup and
146 * never written again _or_ are predominantly read-access.
147 *
148 * Now align to a new cache line as all the following members
149 * might be often dirty.
150 */
151 /* All sockets in TCP_LISTEN state will be in listening_hash.
152 * This is the only table where wildcard'd TCP sockets can
153 * exist. listening_hash is only hashed by local port number.
154 * If lhash2 is initialized, the same socket will also be hashed
155 * to lhash2 by port and address.
156 */
157 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
158 ____cacheline_aligned_in_smp;
159};
160
161#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
162 hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
163
164static inline struct inet_listen_hashbucket *
165inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
166{
167 return &h->lhash2[hash & h->lhash2_mask];
168}
169
170static inline struct inet_ehash_bucket *inet_ehash_bucket(
171 struct inet_hashinfo *hashinfo,
172 unsigned int hash)
173{
174 return &hashinfo->ehash[hash & hashinfo->ehash_mask];
175}
176
177static inline spinlock_t *inet_ehash_lockp(
178 struct inet_hashinfo *hashinfo,
179 unsigned int hash)
180{
181 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
182}
183
184int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
185
186static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
187{
188 kvfree(hashinfo->ehash_locks);
189 hashinfo->ehash_locks = NULL;
190}
191
192static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
193 int dif, int sdif)
194{
195#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
196 return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
197 bound_dev_if, dif, sdif);
198#else
199 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
200#endif
201}
202
203struct inet_bind_bucket *
204inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
205 struct inet_bind_hashbucket *head,
206 const unsigned short snum, int l3mdev);
207void inet_bind_bucket_destroy(struct kmem_cache *cachep,
208 struct inet_bind_bucket *tb);
209
210static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
211 const u32 bhash_size)
212{
213 return (lport + net_hash_mix(net)) & (bhash_size - 1);
214}
215
216void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
217 const unsigned short snum);
218
219/* These can have wildcards, don't try too hard. */
220static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
221{
222 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
223}
224
225static inline int inet_sk_listen_hashfn(const struct sock *sk)
226{
227 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
228}
229
230/* Caller must disable local BH processing. */
231int __inet_inherit_port(const struct sock *sk, struct sock *child);
232
233void inet_put_port(struct sock *sk);
234
235void inet_hashinfo_init(struct inet_hashinfo *h);
236void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
237 unsigned long numentries, int scale,
238 unsigned long low_limit,
239 unsigned long high_limit);
240int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
241
242bool inet_ehash_insert(struct sock *sk, struct sock *osk);
243bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
244int __inet_hash(struct sock *sk, struct sock *osk);
245int inet_hash(struct sock *sk);
246void inet_unhash(struct sock *sk);
247
248struct sock *__inet_lookup_listener(struct net *net,
249 struct inet_hashinfo *hashinfo,
250 struct sk_buff *skb, int doff,
251 const __be32 saddr, const __be16 sport,
252 const __be32 daddr,
253 const unsigned short hnum,
254 const int dif, const int sdif);
255
256static inline struct sock *inet_lookup_listener(struct net *net,
257 struct inet_hashinfo *hashinfo,
258 struct sk_buff *skb, int doff,
259 __be32 saddr, __be16 sport,
260 __be32 daddr, __be16 dport, int dif, int sdif)
261{
262 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
263 daddr, ntohs(dport), dif, sdif);
264}
265
266/* Socket demux engine toys. */
267/* What happens here is ugly; there's a pair of adjacent fields in
268 struct inet_sock; __be16 dport followed by __u16 num. We want to
269 search by pair, so we combine the keys into a single 32bit value
270 and compare with 32bit value read from &...->dport. Let's at least
271 make sure that it's not mixed with anything else...
272 On 64bit targets we combine comparisons with pair of adjacent __be32
273 fields in the same way.
274*/
275#ifdef __BIG_ENDIAN
276#define INET_COMBINED_PORTS(__sport, __dport) \
277 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
278#else /* __LITTLE_ENDIAN */
279#define INET_COMBINED_PORTS(__sport, __dport) \
280 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
281#endif
282
283#if (BITS_PER_LONG == 64)
284#ifdef __BIG_ENDIAN
285#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
286 const __addrpair __name = (__force __addrpair) ( \
287 (((__force __u64)(__be32)(__saddr)) << 32) | \
288 ((__force __u64)(__be32)(__daddr)))
289#else /* __LITTLE_ENDIAN */
290#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
291 const __addrpair __name = (__force __addrpair) ( \
292 (((__force __u64)(__be32)(__daddr)) << 32) | \
293 ((__force __u64)(__be32)(__saddr)))
294#endif /* __BIG_ENDIAN */
295#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
296 (((__sk)->sk_portpair == (__ports)) && \
297 ((__sk)->sk_addrpair == (__cookie)) && \
298 (((__sk)->sk_bound_dev_if == (__dif)) || \
299 ((__sk)->sk_bound_dev_if == (__sdif))) && \
300 net_eq(sock_net(__sk), (__net)))
301#else /* 32-bit arch */
302#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
303 const int __name __deprecated __attribute__((unused))
304
305#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
306 (((__sk)->sk_portpair == (__ports)) && \
307 ((__sk)->sk_daddr == (__saddr)) && \
308 ((__sk)->sk_rcv_saddr == (__daddr)) && \
309 (((__sk)->sk_bound_dev_if == (__dif)) || \
310 ((__sk)->sk_bound_dev_if == (__sdif))) && \
311 net_eq(sock_net(__sk), (__net)))
312#endif /* 64-bit arch */
313
314/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
315 * not check it for lookups anymore, thanks Alexey. -DaveM
316 */
317struct sock *__inet_lookup_established(struct net *net,
318 struct inet_hashinfo *hashinfo,
319 const __be32 saddr, const __be16 sport,
320 const __be32 daddr, const u16 hnum,
321 const int dif, const int sdif);
322
323static inline struct sock *
324 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
325 const __be32 saddr, const __be16 sport,
326 const __be32 daddr, const __be16 dport,
327 const int dif)
328{
329 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
330 ntohs(dport), dif, 0);
331}
332
333static inline struct sock *__inet_lookup(struct net *net,
334 struct inet_hashinfo *hashinfo,
335 struct sk_buff *skb, int doff,
336 const __be32 saddr, const __be16 sport,
337 const __be32 daddr, const __be16 dport,
338 const int dif, const int sdif,
339 bool *refcounted)
340{
341 u16 hnum = ntohs(dport);
342 struct sock *sk;
343
344 sk = __inet_lookup_established(net, hashinfo, saddr, sport,
345 daddr, hnum, dif, sdif);
346 *refcounted = true;
347 if (sk)
348 return sk;
349 *refcounted = false;
350 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
351 sport, daddr, hnum, dif, sdif);
352}
353
354static inline struct sock *inet_lookup(struct net *net,
355 struct inet_hashinfo *hashinfo,
356 struct sk_buff *skb, int doff,
357 const __be32 saddr, const __be16 sport,
358 const __be32 daddr, const __be16 dport,
359 const int dif)
360{
361 struct sock *sk;
362 bool refcounted;
363
364 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
365 dport, dif, 0, &refcounted);
366
367 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
368 sk = NULL;
369 return sk;
370}
371
372static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
373 struct sk_buff *skb,
374 int doff,
375 const __be16 sport,
376 const __be16 dport,
377 const int sdif,
378 bool *refcounted)
379{
380 struct sock *sk = skb_steal_sock(skb);
381 const struct iphdr *iph = ip_hdr(skb);
382
383 *refcounted = true;
384 if (sk)
385 return sk;
386
387 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
388 doff, iph->saddr, sport,
389 iph->daddr, dport, inet_iif(skb), sdif,
390 refcounted);
391}
392
393u32 inet6_ehashfn(const struct net *net,
394 const struct in6_addr *laddr, const u16 lport,
395 const struct in6_addr *faddr, const __be16 fport);
396
397static inline void sk_daddr_set(struct sock *sk, __be32 addr)
398{
399 sk->sk_daddr = addr; /* alias of inet_daddr */
400#if IS_ENABLED(CONFIG_IPV6)
401 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
402#endif
403}
404
405static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
406{
407 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
408#if IS_ENABLED(CONFIG_IPV6)
409 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
410#endif
411}
412
413int __inet_hash_connect(struct inet_timewait_death_row *death_row,
414 struct sock *sk, u32 port_offset,
415 int (*check_established)(struct inet_timewait_death_row *,
416 struct sock *, __u16,
417 struct inet_timewait_sock **));
418
419int inet_hash_connect(struct inet_timewait_death_row *death_row,
420 struct sock *sk);
421#endif /* _INET_HASHTABLES_H */
422