1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * net/core/dst.c Protocol independent destination cache. |
4 | * |
5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
6 | * |
7 | */ |
8 | |
9 | #include <linux/bitops.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/init.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/workqueue.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/module.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/netdevice.h> |
18 | #include <linux/skbuff.h> |
19 | #include <linux/string.h> |
20 | #include <linux/types.h> |
21 | #include <net/net_namespace.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/prefetch.h> |
24 | #include <net/lwtunnel.h> |
25 | #include <net/xfrm.h> |
26 | |
27 | #include <net/dst.h> |
28 | #include <net/dst_metadata.h> |
29 | |
30 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
31 | { |
32 | kfree_skb(skb); |
33 | return 0; |
34 | } |
35 | EXPORT_SYMBOL(dst_discard_out); |
36 | |
37 | const struct dst_metrics dst_default_metrics = { |
38 | /* This initializer is needed to force linker to place this variable |
39 | * into const section. Otherwise it might end into bss section. |
40 | * We really want to avoid false sharing on this variable, and catch |
41 | * any writes on it. |
42 | */ |
43 | .refcnt = REFCOUNT_INIT(1), |
44 | }; |
45 | EXPORT_SYMBOL(dst_default_metrics); |
46 | |
47 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
48 | struct net_device *dev, int initial_obsolete, |
49 | unsigned short flags) |
50 | { |
51 | dst->dev = dev; |
52 | netdev_hold(dev, tracker: &dst->dev_tracker, GFP_ATOMIC); |
53 | dst->ops = ops; |
54 | dst_init_metrics(dst, src_metrics: dst_default_metrics.metrics, read_only: true); |
55 | dst->expires = 0UL; |
56 | #ifdef CONFIG_XFRM |
57 | dst->xfrm = NULL; |
58 | #endif |
59 | dst->input = dst_discard; |
60 | dst->output = dst_discard_out; |
61 | dst->error = 0; |
62 | dst->obsolete = initial_obsolete; |
63 | dst->header_len = 0; |
64 | dst->trailer_len = 0; |
65 | #ifdef CONFIG_IP_ROUTE_CLASSID |
66 | dst->tclassid = 0; |
67 | #endif |
68 | dst->lwtstate = NULL; |
69 | rcuref_init(ref: &dst->__rcuref, cnt: 1); |
70 | INIT_LIST_HEAD(list: &dst->rt_uncached); |
71 | dst->__use = 0; |
72 | dst->lastuse = jiffies; |
73 | dst->flags = flags; |
74 | if (!(flags & DST_NOCOUNT)) |
75 | dst_entries_add(dst: ops, val: 1); |
76 | } |
77 | EXPORT_SYMBOL(dst_init); |
78 | |
79 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
80 | int initial_obsolete, unsigned short flags) |
81 | { |
82 | struct dst_entry *dst; |
83 | |
84 | if (ops->gc && |
85 | !(flags & DST_NOCOUNT) && |
86 | dst_entries_get_fast(dst: ops) > ops->gc_thresh) |
87 | ops->gc(ops); |
88 | |
89 | dst = kmem_cache_alloc(cachep: ops->kmem_cachep, GFP_ATOMIC); |
90 | if (!dst) |
91 | return NULL; |
92 | |
93 | dst_init(dst, ops, dev, initial_obsolete, flags); |
94 | |
95 | return dst; |
96 | } |
97 | EXPORT_SYMBOL(dst_alloc); |
98 | |
99 | struct dst_entry *dst_destroy(struct dst_entry * dst) |
100 | { |
101 | struct dst_entry *child = NULL; |
102 | |
103 | smp_rmb(); |
104 | |
105 | #ifdef CONFIG_XFRM |
106 | if (dst->xfrm) { |
107 | struct xfrm_dst *xdst = (struct xfrm_dst *) dst; |
108 | |
109 | child = xdst->child; |
110 | } |
111 | #endif |
112 | if (!(dst->flags & DST_NOCOUNT)) |
113 | dst_entries_add(dst: dst->ops, val: -1); |
114 | |
115 | if (dst->ops->destroy) |
116 | dst->ops->destroy(dst); |
117 | netdev_put(dev: dst->dev, tracker: &dst->dev_tracker); |
118 | |
119 | lwtstate_put(lws: dst->lwtstate); |
120 | |
121 | if (dst->flags & DST_METADATA) |
122 | metadata_dst_free((struct metadata_dst *)dst); |
123 | else |
124 | kmem_cache_free(s: dst->ops->kmem_cachep, objp: dst); |
125 | |
126 | dst = child; |
127 | if (dst) |
128 | dst_release_immediate(dst); |
129 | return NULL; |
130 | } |
131 | EXPORT_SYMBOL(dst_destroy); |
132 | |
133 | static void dst_destroy_rcu(struct rcu_head *head) |
134 | { |
135 | struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); |
136 | |
137 | dst = dst_destroy(dst); |
138 | } |
139 | |
140 | /* Operations to mark dst as DEAD and clean up the net device referenced |
141 | * by dst: |
142 | * 1. put the dst under blackhole interface and discard all tx/rx packets |
143 | * on this route. |
144 | * 2. release the net_device |
145 | * This function should be called when removing routes from the fib tree |
146 | * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to |
147 | * make the next dst_ops->check() fail. |
148 | */ |
149 | void dst_dev_put(struct dst_entry *dst) |
150 | { |
151 | struct net_device *dev = dst->dev; |
152 | |
153 | dst->obsolete = DST_OBSOLETE_DEAD; |
154 | if (dst->ops->ifdown) |
155 | dst->ops->ifdown(dst, dev); |
156 | dst->input = dst_discard; |
157 | dst->output = dst_discard_out; |
158 | dst->dev = blackhole_netdev; |
159 | netdev_ref_replace(odev: dev, ndev: blackhole_netdev, tracker: &dst->dev_tracker, |
160 | GFP_ATOMIC); |
161 | } |
162 | EXPORT_SYMBOL(dst_dev_put); |
163 | |
164 | void dst_release(struct dst_entry *dst) |
165 | { |
166 | if (dst && rcuref_put(ref: &dst->__rcuref)) |
167 | call_rcu_hurry(head: &dst->rcu_head, func: dst_destroy_rcu); |
168 | } |
169 | EXPORT_SYMBOL(dst_release); |
170 | |
171 | void dst_release_immediate(struct dst_entry *dst) |
172 | { |
173 | if (dst && rcuref_put(ref: &dst->__rcuref)) |
174 | dst_destroy(dst); |
175 | } |
176 | EXPORT_SYMBOL(dst_release_immediate); |
177 | |
178 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) |
179 | { |
180 | struct dst_metrics *p = kmalloc(size: sizeof(*p), GFP_ATOMIC); |
181 | |
182 | if (p) { |
183 | struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); |
184 | unsigned long prev, new; |
185 | |
186 | refcount_set(r: &p->refcnt, n: 1); |
187 | memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); |
188 | |
189 | new = (unsigned long) p; |
190 | prev = cmpxchg(&dst->_metrics, old, new); |
191 | |
192 | if (prev != old) { |
193 | kfree(objp: p); |
194 | p = (struct dst_metrics *)__DST_METRICS_PTR(prev); |
195 | if (prev & DST_METRICS_READ_ONLY) |
196 | p = NULL; |
197 | } else if (prev & DST_METRICS_REFCOUNTED) { |
198 | if (refcount_dec_and_test(r: &old_p->refcnt)) |
199 | kfree(objp: old_p); |
200 | } |
201 | } |
202 | BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); |
203 | return (u32 *)p; |
204 | } |
205 | EXPORT_SYMBOL(dst_cow_metrics_generic); |
206 | |
207 | /* Caller asserts that dst_metrics_read_only(dst) is false. */ |
208 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) |
209 | { |
210 | unsigned long prev, new; |
211 | |
212 | new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; |
213 | prev = cmpxchg(&dst->_metrics, old, new); |
214 | if (prev == old) |
215 | kfree(__DST_METRICS_PTR(old)); |
216 | } |
217 | EXPORT_SYMBOL(__dst_destroy_metrics_generic); |
218 | |
219 | struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie) |
220 | { |
221 | return NULL; |
222 | } |
223 | |
224 | u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) |
225 | { |
226 | return NULL; |
227 | } |
228 | |
229 | struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, |
230 | struct sk_buff *skb, |
231 | const void *daddr) |
232 | { |
233 | return NULL; |
234 | } |
235 | |
236 | void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, |
237 | struct sk_buff *skb, u32 mtu, |
238 | bool confirm_neigh) |
239 | { |
240 | } |
241 | EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu); |
242 | |
243 | void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, |
244 | struct sk_buff *skb) |
245 | { |
246 | } |
247 | EXPORT_SYMBOL_GPL(dst_blackhole_redirect); |
248 | |
249 | unsigned int dst_blackhole_mtu(const struct dst_entry *dst) |
250 | { |
251 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
252 | |
253 | return mtu ? : dst->dev->mtu; |
254 | } |
255 | EXPORT_SYMBOL_GPL(dst_blackhole_mtu); |
256 | |
257 | static struct dst_ops dst_blackhole_ops = { |
258 | .family = AF_UNSPEC, |
259 | .neigh_lookup = dst_blackhole_neigh_lookup, |
260 | .check = dst_blackhole_check, |
261 | .cow_metrics = dst_blackhole_cow_metrics, |
262 | .update_pmtu = dst_blackhole_update_pmtu, |
263 | .redirect = dst_blackhole_redirect, |
264 | .mtu = dst_blackhole_mtu, |
265 | }; |
266 | |
267 | static void __metadata_dst_init(struct metadata_dst *md_dst, |
268 | enum metadata_type type, u8 optslen) |
269 | { |
270 | struct dst_entry *dst; |
271 | |
272 | dst = &md_dst->dst; |
273 | dst_init(dst, &dst_blackhole_ops, NULL, DST_OBSOLETE_NONE, |
274 | DST_METADATA | DST_NOCOUNT); |
275 | memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); |
276 | md_dst->type = type; |
277 | } |
278 | |
279 | struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type, |
280 | gfp_t flags) |
281 | { |
282 | struct metadata_dst *md_dst; |
283 | |
284 | md_dst = kmalloc(size: sizeof(*md_dst) + optslen, flags); |
285 | if (!md_dst) |
286 | return NULL; |
287 | |
288 | __metadata_dst_init(md_dst, type, optslen); |
289 | |
290 | return md_dst; |
291 | } |
292 | EXPORT_SYMBOL_GPL(metadata_dst_alloc); |
293 | |
294 | void metadata_dst_free(struct metadata_dst *md_dst) |
295 | { |
296 | #ifdef CONFIG_DST_CACHE |
297 | if (md_dst->type == METADATA_IP_TUNNEL) |
298 | dst_cache_destroy(dst_cache: &md_dst->u.tun_info.dst_cache); |
299 | #endif |
300 | if (md_dst->type == METADATA_XFRM) |
301 | dst_release(md_dst->u.xfrm_info.dst_orig); |
302 | kfree(objp: md_dst); |
303 | } |
304 | EXPORT_SYMBOL_GPL(metadata_dst_free); |
305 | |
306 | struct metadata_dst __percpu * |
307 | metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags) |
308 | { |
309 | int cpu; |
310 | struct metadata_dst __percpu *md_dst; |
311 | |
312 | md_dst = __alloc_percpu_gfp(size: sizeof(struct metadata_dst) + optslen, |
313 | align: __alignof__(struct metadata_dst), gfp: flags); |
314 | if (!md_dst) |
315 | return NULL; |
316 | |
317 | for_each_possible_cpu(cpu) |
318 | __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); |
319 | |
320 | return md_dst; |
321 | } |
322 | EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu); |
323 | |
324 | void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst) |
325 | { |
326 | int cpu; |
327 | |
328 | for_each_possible_cpu(cpu) { |
329 | struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); |
330 | |
331 | #ifdef CONFIG_DST_CACHE |
332 | if (one_md_dst->type == METADATA_IP_TUNNEL) |
333 | dst_cache_destroy(dst_cache: &one_md_dst->u.tun_info.dst_cache); |
334 | #endif |
335 | if (one_md_dst->type == METADATA_XFRM) |
336 | dst_release(one_md_dst->u.xfrm_info.dst_orig); |
337 | } |
338 | free_percpu(pdata: md_dst); |
339 | } |
340 | EXPORT_SYMBOL_GPL(metadata_dst_free_percpu); |
341 | |