1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * INETPEER - A storage for permanent information about peers
4 *
5 * Authors: Andrey V. Savochkin <saw@msu.ru>
6 */
7
8#ifndef _NET_INETPEER_H
9#define _NET_INETPEER_H
10
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/jiffies.h>
14#include <linux/spinlock.h>
15#include <linux/rtnetlink.h>
16#include <net/ipv6.h>
17#include <linux/atomic.h>
18
19/* IPv4 address key for cache lookups */
20struct ipv4_addr_key {
21 __be32 addr;
22 int vif;
23};
24
25#define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32))
26
27struct inetpeer_addr {
28 union {
29 struct ipv4_addr_key a4;
30 struct in6_addr a6;
31 u32 key[INETPEER_MAXKEYSZ];
32 };
33 __u16 family;
34};
35
36struct inet_peer {
37 struct rb_node rb_node;
38 struct inetpeer_addr daddr;
39
40 u32 metrics[RTAX_MAX];
41 u32 rate_tokens; /* rate limiting for ICMP */
42 u32 n_redirects;
43 unsigned long rate_last;
44 /*
45 * Once inet_peer is queued for deletion (refcnt == 0), following field
46 * is not available: rid
47 * We can share memory with rcu_head to help keep inet_peer small.
48 */
49 union {
50 struct {
51 atomic_t rid; /* Frag reception counter */
52 };
53 struct rcu_head rcu;
54 };
55
56 /* following fields might be frequently dirtied */
57 __u32 dtime; /* the time of last use of not referenced entries */
58 refcount_t refcnt;
59};
60
61struct inet_peer_base {
62 struct rb_root rb_root;
63 seqlock_t lock;
64 int total;
65};
66
67void inet_peer_base_init(struct inet_peer_base *);
68
69void inet_initpeers(void) __init;
70
71#define INETPEER_METRICS_NEW (~(u32) 0)
72
73static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
74{
75 iaddr->a4.addr = ip;
76 iaddr->a4.vif = 0;
77 iaddr->family = AF_INET;
78}
79
80static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
81{
82 return iaddr->a4.addr;
83}
84
85static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
86 struct in6_addr *in6)
87{
88 iaddr->a6 = *in6;
89 iaddr->family = AF_INET6;
90}
91
92static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
93{
94 return &iaddr->a6;
95}
96
97/* can be called with or without local BH being disabled */
98struct inet_peer *inet_getpeer(struct inet_peer_base *base,
99 const struct inetpeer_addr *daddr,
100 int create);
101
102static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
103 __be32 v4daddr,
104 int vif, int create)
105{
106 struct inetpeer_addr daddr;
107
108 daddr.a4.addr = v4daddr;
109 daddr.a4.vif = vif;
110 daddr.family = AF_INET;
111 return inet_getpeer(base, &daddr, create);
112}
113
114static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
115 const struct in6_addr *v6daddr,
116 int create)
117{
118 struct inetpeer_addr daddr;
119
120 daddr.a6 = *v6daddr;
121 daddr.family = AF_INET6;
122 return inet_getpeer(base, &daddr, create);
123}
124
125static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
126 const struct inetpeer_addr *b)
127{
128 int i, n;
129
130 if (a->family == AF_INET)
131 n = sizeof(a->a4) / sizeof(u32);
132 else
133 n = sizeof(a->a6) / sizeof(u32);
134
135 for (i = 0; i < n; i++) {
136 if (a->key[i] == b->key[i])
137 continue;
138 if (a->key[i] < b->key[i])
139 return -1;
140 return 1;
141 }
142
143 return 0;
144}
145
146/* can be called from BH context or outside */
147void inet_putpeer(struct inet_peer *p);
148bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
149
150void inetpeer_invalidate_tree(struct inet_peer_base *);
151
152#endif /* _NET_INETPEER_H */
153