1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ |
3 | |
4 | #include <linux/bpf.h> |
5 | #include <linux/btf_ids.h> |
6 | #include <linux/filter.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/file.h> |
9 | #include <linux/net.h> |
10 | #include <linux/workqueue.h> |
11 | #include <linux/skmsg.h> |
12 | #include <linux/list.h> |
13 | #include <linux/jhash.h> |
14 | #include <linux/sock_diag.h> |
15 | #include <net/udp.h> |
16 | |
17 | struct bpf_stab { |
18 | struct bpf_map map; |
19 | struct sock **sks; |
20 | struct sk_psock_progs progs; |
21 | spinlock_t lock; |
22 | }; |
23 | |
24 | #define SOCK_CREATE_FLAG_MASK \ |
25 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
26 | |
27 | static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, |
28 | struct bpf_prog *old, u32 which); |
29 | static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); |
30 | |
31 | static struct bpf_map *sock_map_alloc(union bpf_attr *attr) |
32 | { |
33 | struct bpf_stab *stab; |
34 | |
35 | if (attr->max_entries == 0 || |
36 | attr->key_size != 4 || |
37 | (attr->value_size != sizeof(u32) && |
38 | attr->value_size != sizeof(u64)) || |
39 | attr->map_flags & ~SOCK_CREATE_FLAG_MASK) |
40 | return ERR_PTR(error: -EINVAL); |
41 | |
42 | stab = bpf_map_area_alloc(size: sizeof(*stab), NUMA_NO_NODE); |
43 | if (!stab) |
44 | return ERR_PTR(error: -ENOMEM); |
45 | |
46 | bpf_map_init_from_attr(map: &stab->map, attr); |
47 | spin_lock_init(&stab->lock); |
48 | |
49 | stab->sks = bpf_map_area_alloc(size: (u64) stab->map.max_entries * |
50 | sizeof(struct sock *), |
51 | numa_node: stab->map.numa_node); |
52 | if (!stab->sks) { |
53 | bpf_map_area_free(base: stab); |
54 | return ERR_PTR(error: -ENOMEM); |
55 | } |
56 | |
57 | return &stab->map; |
58 | } |
59 | |
60 | int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) |
61 | { |
62 | u32 ufd = attr->target_fd; |
63 | struct bpf_map *map; |
64 | struct fd f; |
65 | int ret; |
66 | |
67 | if (attr->attach_flags || attr->replace_bpf_fd) |
68 | return -EINVAL; |
69 | |
70 | f = fdget(fd: ufd); |
71 | map = __bpf_map_get(f); |
72 | if (IS_ERR(ptr: map)) |
73 | return PTR_ERR(ptr: map); |
74 | ret = sock_map_prog_update(map, prog, NULL, which: attr->attach_type); |
75 | fdput(fd: f); |
76 | return ret; |
77 | } |
78 | |
79 | int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) |
80 | { |
81 | u32 ufd = attr->target_fd; |
82 | struct bpf_prog *prog; |
83 | struct bpf_map *map; |
84 | struct fd f; |
85 | int ret; |
86 | |
87 | if (attr->attach_flags || attr->replace_bpf_fd) |
88 | return -EINVAL; |
89 | |
90 | f = fdget(fd: ufd); |
91 | map = __bpf_map_get(f); |
92 | if (IS_ERR(ptr: map)) |
93 | return PTR_ERR(ptr: map); |
94 | |
95 | prog = bpf_prog_get(ufd: attr->attach_bpf_fd); |
96 | if (IS_ERR(ptr: prog)) { |
97 | ret = PTR_ERR(ptr: prog); |
98 | goto put_map; |
99 | } |
100 | |
101 | if (prog->type != ptype) { |
102 | ret = -EINVAL; |
103 | goto put_prog; |
104 | } |
105 | |
106 | ret = sock_map_prog_update(map, NULL, old: prog, which: attr->attach_type); |
107 | put_prog: |
108 | bpf_prog_put(prog); |
109 | put_map: |
110 | fdput(fd: f); |
111 | return ret; |
112 | } |
113 | |
114 | static void sock_map_sk_acquire(struct sock *sk) |
115 | __acquires(&sk->sk_lock.slock) |
116 | { |
117 | lock_sock(sk); |
118 | rcu_read_lock(); |
119 | } |
120 | |
121 | static void sock_map_sk_release(struct sock *sk) |
122 | __releases(&sk->sk_lock.slock) |
123 | { |
124 | rcu_read_unlock(); |
125 | release_sock(sk); |
126 | } |
127 | |
128 | static void sock_map_add_link(struct sk_psock *psock, |
129 | struct sk_psock_link *link, |
130 | struct bpf_map *map, void *link_raw) |
131 | { |
132 | link->link_raw = link_raw; |
133 | link->map = map; |
134 | spin_lock_bh(lock: &psock->link_lock); |
135 | list_add_tail(new: &link->list, head: &psock->link); |
136 | spin_unlock_bh(lock: &psock->link_lock); |
137 | } |
138 | |
139 | static void sock_map_del_link(struct sock *sk, |
140 | struct sk_psock *psock, void *link_raw) |
141 | { |
142 | bool strp_stop = false, verdict_stop = false; |
143 | struct sk_psock_link *link, *tmp; |
144 | |
145 | spin_lock_bh(lock: &psock->link_lock); |
146 | list_for_each_entry_safe(link, tmp, &psock->link, list) { |
147 | if (link->link_raw == link_raw) { |
148 | struct bpf_map *map = link->map; |
149 | struct sk_psock_progs *progs = sock_map_progs(map); |
150 | |
151 | if (psock->saved_data_ready && progs->stream_parser) |
152 | strp_stop = true; |
153 | if (psock->saved_data_ready && progs->stream_verdict) |
154 | verdict_stop = true; |
155 | if (psock->saved_data_ready && progs->skb_verdict) |
156 | verdict_stop = true; |
157 | list_del(entry: &link->list); |
158 | sk_psock_free_link(link); |
159 | } |
160 | } |
161 | spin_unlock_bh(lock: &psock->link_lock); |
162 | if (strp_stop || verdict_stop) { |
163 | write_lock_bh(&sk->sk_callback_lock); |
164 | if (strp_stop) |
165 | sk_psock_stop_strp(sk, psock); |
166 | if (verdict_stop) |
167 | sk_psock_stop_verdict(sk, psock); |
168 | |
169 | if (psock->psock_update_sk_prot) |
170 | psock->psock_update_sk_prot(sk, psock, false); |
171 | write_unlock_bh(&sk->sk_callback_lock); |
172 | } |
173 | } |
174 | |
175 | static void sock_map_unref(struct sock *sk, void *link_raw) |
176 | { |
177 | struct sk_psock *psock = sk_psock(sk); |
178 | |
179 | if (likely(psock)) { |
180 | sock_map_del_link(sk, psock, link_raw); |
181 | sk_psock_put(sk, psock); |
182 | } |
183 | } |
184 | |
185 | static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) |
186 | { |
187 | if (!sk->sk_prot->psock_update_sk_prot) |
188 | return -EINVAL; |
189 | psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot; |
190 | return sk->sk_prot->psock_update_sk_prot(sk, psock, false); |
191 | } |
192 | |
193 | static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) |
194 | { |
195 | struct sk_psock *psock; |
196 | |
197 | rcu_read_lock(); |
198 | psock = sk_psock(sk); |
199 | if (psock) { |
200 | if (sk->sk_prot->close != sock_map_close) { |
201 | psock = ERR_PTR(error: -EBUSY); |
202 | goto out; |
203 | } |
204 | |
205 | if (!refcount_inc_not_zero(r: &psock->refcnt)) |
206 | psock = ERR_PTR(error: -EBUSY); |
207 | } |
208 | out: |
209 | rcu_read_unlock(); |
210 | return psock; |
211 | } |
212 | |
213 | static int sock_map_link(struct bpf_map *map, struct sock *sk) |
214 | { |
215 | struct sk_psock_progs *progs = sock_map_progs(map); |
216 | struct bpf_prog *stream_verdict = NULL; |
217 | struct bpf_prog *stream_parser = NULL; |
218 | struct bpf_prog *skb_verdict = NULL; |
219 | struct bpf_prog *msg_parser = NULL; |
220 | struct sk_psock *psock; |
221 | int ret; |
222 | |
223 | stream_verdict = READ_ONCE(progs->stream_verdict); |
224 | if (stream_verdict) { |
225 | stream_verdict = bpf_prog_inc_not_zero(prog: stream_verdict); |
226 | if (IS_ERR(ptr: stream_verdict)) |
227 | return PTR_ERR(ptr: stream_verdict); |
228 | } |
229 | |
230 | stream_parser = READ_ONCE(progs->stream_parser); |
231 | if (stream_parser) { |
232 | stream_parser = bpf_prog_inc_not_zero(prog: stream_parser); |
233 | if (IS_ERR(ptr: stream_parser)) { |
234 | ret = PTR_ERR(ptr: stream_parser); |
235 | goto out_put_stream_verdict; |
236 | } |
237 | } |
238 | |
239 | msg_parser = READ_ONCE(progs->msg_parser); |
240 | if (msg_parser) { |
241 | msg_parser = bpf_prog_inc_not_zero(prog: msg_parser); |
242 | if (IS_ERR(ptr: msg_parser)) { |
243 | ret = PTR_ERR(ptr: msg_parser); |
244 | goto out_put_stream_parser; |
245 | } |
246 | } |
247 | |
248 | skb_verdict = READ_ONCE(progs->skb_verdict); |
249 | if (skb_verdict) { |
250 | skb_verdict = bpf_prog_inc_not_zero(prog: skb_verdict); |
251 | if (IS_ERR(ptr: skb_verdict)) { |
252 | ret = PTR_ERR(ptr: skb_verdict); |
253 | goto out_put_msg_parser; |
254 | } |
255 | } |
256 | |
257 | psock = sock_map_psock_get_checked(sk); |
258 | if (IS_ERR(ptr: psock)) { |
259 | ret = PTR_ERR(ptr: psock); |
260 | goto out_progs; |
261 | } |
262 | |
263 | if (psock) { |
264 | if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || |
265 | (stream_parser && READ_ONCE(psock->progs.stream_parser)) || |
266 | (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) || |
267 | (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) || |
268 | (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) || |
269 | (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) { |
270 | sk_psock_put(sk, psock); |
271 | ret = -EBUSY; |
272 | goto out_progs; |
273 | } |
274 | } else { |
275 | psock = sk_psock_init(sk, node: map->numa_node); |
276 | if (IS_ERR(ptr: psock)) { |
277 | ret = PTR_ERR(ptr: psock); |
278 | goto out_progs; |
279 | } |
280 | } |
281 | |
282 | if (msg_parser) |
283 | psock_set_prog(pprog: &psock->progs.msg_parser, prog: msg_parser); |
284 | if (stream_parser) |
285 | psock_set_prog(pprog: &psock->progs.stream_parser, prog: stream_parser); |
286 | if (stream_verdict) |
287 | psock_set_prog(pprog: &psock->progs.stream_verdict, prog: stream_verdict); |
288 | if (skb_verdict) |
289 | psock_set_prog(pprog: &psock->progs.skb_verdict, prog: skb_verdict); |
290 | |
291 | /* msg_* and stream_* programs references tracked in psock after this |
292 | * point. Reference dec and cleanup will occur through psock destructor |
293 | */ |
294 | ret = sock_map_init_proto(sk, psock); |
295 | if (ret < 0) { |
296 | sk_psock_put(sk, psock); |
297 | goto out; |
298 | } |
299 | |
300 | write_lock_bh(&sk->sk_callback_lock); |
301 | if (stream_parser && stream_verdict && !psock->saved_data_ready) { |
302 | ret = sk_psock_init_strp(sk, psock); |
303 | if (ret) { |
304 | write_unlock_bh(&sk->sk_callback_lock); |
305 | sk_psock_put(sk, psock); |
306 | goto out; |
307 | } |
308 | sk_psock_start_strp(sk, psock); |
309 | } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { |
310 | sk_psock_start_verdict(sk,psock); |
311 | } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { |
312 | sk_psock_start_verdict(sk, psock); |
313 | } |
314 | write_unlock_bh(&sk->sk_callback_lock); |
315 | return 0; |
316 | out_progs: |
317 | if (skb_verdict) |
318 | bpf_prog_put(prog: skb_verdict); |
319 | out_put_msg_parser: |
320 | if (msg_parser) |
321 | bpf_prog_put(prog: msg_parser); |
322 | out_put_stream_parser: |
323 | if (stream_parser) |
324 | bpf_prog_put(prog: stream_parser); |
325 | out_put_stream_verdict: |
326 | if (stream_verdict) |
327 | bpf_prog_put(prog: stream_verdict); |
328 | out: |
329 | return ret; |
330 | } |
331 | |
332 | static void sock_map_free(struct bpf_map *map) |
333 | { |
334 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
335 | int i; |
336 | |
337 | /* After the sync no updates or deletes will be in-flight so it |
338 | * is safe to walk map and remove entries without risking a race |
339 | * in EEXIST update case. |
340 | */ |
341 | synchronize_rcu(); |
342 | for (i = 0; i < stab->map.max_entries; i++) { |
343 | struct sock **psk = &stab->sks[i]; |
344 | struct sock *sk; |
345 | |
346 | sk = xchg(psk, NULL); |
347 | if (sk) { |
348 | sock_hold(sk); |
349 | lock_sock(sk); |
350 | rcu_read_lock(); |
351 | sock_map_unref(sk, link_raw: psk); |
352 | rcu_read_unlock(); |
353 | release_sock(sk); |
354 | sock_put(sk); |
355 | } |
356 | } |
357 | |
358 | /* wait for psock readers accessing its map link */ |
359 | synchronize_rcu(); |
360 | |
361 | bpf_map_area_free(base: stab->sks); |
362 | bpf_map_area_free(base: stab); |
363 | } |
364 | |
365 | static void sock_map_release_progs(struct bpf_map *map) |
366 | { |
367 | psock_progs_drop(progs: &container_of(map, struct bpf_stab, map)->progs); |
368 | } |
369 | |
370 | static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) |
371 | { |
372 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
373 | |
374 | WARN_ON_ONCE(!rcu_read_lock_held()); |
375 | |
376 | if (unlikely(key >= map->max_entries)) |
377 | return NULL; |
378 | return READ_ONCE(stab->sks[key]); |
379 | } |
380 | |
381 | static void *sock_map_lookup(struct bpf_map *map, void *key) |
382 | { |
383 | struct sock *sk; |
384 | |
385 | sk = __sock_map_lookup_elem(map, key: *(u32 *)key); |
386 | if (!sk) |
387 | return NULL; |
388 | if (sk_is_refcounted(sk) && !refcount_inc_not_zero(r: &sk->sk_refcnt)) |
389 | return NULL; |
390 | return sk; |
391 | } |
392 | |
393 | static void *sock_map_lookup_sys(struct bpf_map *map, void *key) |
394 | { |
395 | struct sock *sk; |
396 | |
397 | if (map->value_size != sizeof(u64)) |
398 | return ERR_PTR(error: -ENOSPC); |
399 | |
400 | sk = __sock_map_lookup_elem(map, key: *(u32 *)key); |
401 | if (!sk) |
402 | return ERR_PTR(error: -ENOENT); |
403 | |
404 | __sock_gen_cookie(sk); |
405 | return &sk->sk_cookie; |
406 | } |
407 | |
408 | static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, |
409 | struct sock **psk) |
410 | { |
411 | struct sock *sk; |
412 | int err = 0; |
413 | |
414 | spin_lock_bh(lock: &stab->lock); |
415 | sk = *psk; |
416 | if (!sk_test || sk_test == sk) |
417 | sk = xchg(psk, NULL); |
418 | |
419 | if (likely(sk)) |
420 | sock_map_unref(sk, link_raw: psk); |
421 | else |
422 | err = -EINVAL; |
423 | |
424 | spin_unlock_bh(lock: &stab->lock); |
425 | return err; |
426 | } |
427 | |
428 | static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, |
429 | void *link_raw) |
430 | { |
431 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
432 | |
433 | __sock_map_delete(stab, sk_test: sk, psk: link_raw); |
434 | } |
435 | |
436 | static long sock_map_delete_elem(struct bpf_map *map, void *key) |
437 | { |
438 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
439 | u32 i = *(u32 *)key; |
440 | struct sock **psk; |
441 | |
442 | if (unlikely(i >= map->max_entries)) |
443 | return -EINVAL; |
444 | |
445 | psk = &stab->sks[i]; |
446 | return __sock_map_delete(stab, NULL, psk); |
447 | } |
448 | |
449 | static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) |
450 | { |
451 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
452 | u32 i = key ? *(u32 *)key : U32_MAX; |
453 | u32 *key_next = next; |
454 | |
455 | if (i == stab->map.max_entries - 1) |
456 | return -ENOENT; |
457 | if (i >= stab->map.max_entries) |
458 | *key_next = 0; |
459 | else |
460 | *key_next = i + 1; |
461 | return 0; |
462 | } |
463 | |
464 | static int sock_map_update_common(struct bpf_map *map, u32 idx, |
465 | struct sock *sk, u64 flags) |
466 | { |
467 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
468 | struct sk_psock_link *link; |
469 | struct sk_psock *psock; |
470 | struct sock *osk; |
471 | int ret; |
472 | |
473 | WARN_ON_ONCE(!rcu_read_lock_held()); |
474 | if (unlikely(flags > BPF_EXIST)) |
475 | return -EINVAL; |
476 | if (unlikely(idx >= map->max_entries)) |
477 | return -E2BIG; |
478 | |
479 | link = sk_psock_init_link(); |
480 | if (!link) |
481 | return -ENOMEM; |
482 | |
483 | ret = sock_map_link(map, sk); |
484 | if (ret < 0) |
485 | goto out_free; |
486 | |
487 | psock = sk_psock(sk); |
488 | WARN_ON_ONCE(!psock); |
489 | |
490 | spin_lock_bh(lock: &stab->lock); |
491 | osk = stab->sks[idx]; |
492 | if (osk && flags == BPF_NOEXIST) { |
493 | ret = -EEXIST; |
494 | goto out_unlock; |
495 | } else if (!osk && flags == BPF_EXIST) { |
496 | ret = -ENOENT; |
497 | goto out_unlock; |
498 | } |
499 | |
500 | sock_map_add_link(psock, link, map, link_raw: &stab->sks[idx]); |
501 | stab->sks[idx] = sk; |
502 | if (osk) |
503 | sock_map_unref(sk: osk, link_raw: &stab->sks[idx]); |
504 | spin_unlock_bh(lock: &stab->lock); |
505 | return 0; |
506 | out_unlock: |
507 | spin_unlock_bh(lock: &stab->lock); |
508 | if (psock) |
509 | sk_psock_put(sk, psock); |
510 | out_free: |
511 | sk_psock_free_link(link); |
512 | return ret; |
513 | } |
514 | |
515 | static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) |
516 | { |
517 | return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || |
518 | ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || |
519 | ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; |
520 | } |
521 | |
522 | static bool sock_map_redirect_allowed(const struct sock *sk) |
523 | { |
524 | if (sk_is_tcp(sk)) |
525 | return sk->sk_state != TCP_LISTEN; |
526 | else |
527 | return sk->sk_state == TCP_ESTABLISHED; |
528 | } |
529 | |
530 | static bool sock_map_sk_is_suitable(const struct sock *sk) |
531 | { |
532 | return !!sk->sk_prot->psock_update_sk_prot; |
533 | } |
534 | |
535 | static bool sock_map_sk_state_allowed(const struct sock *sk) |
536 | { |
537 | if (sk_is_tcp(sk)) |
538 | return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); |
539 | return true; |
540 | } |
541 | |
542 | static int sock_hash_update_common(struct bpf_map *map, void *key, |
543 | struct sock *sk, u64 flags); |
544 | |
545 | int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, |
546 | u64 flags) |
547 | { |
548 | struct socket *sock; |
549 | struct sock *sk; |
550 | int ret; |
551 | u64 ufd; |
552 | |
553 | if (map->value_size == sizeof(u64)) |
554 | ufd = *(u64 *)value; |
555 | else |
556 | ufd = *(u32 *)value; |
557 | if (ufd > S32_MAX) |
558 | return -EINVAL; |
559 | |
560 | sock = sockfd_lookup(fd: ufd, err: &ret); |
561 | if (!sock) |
562 | return ret; |
563 | sk = sock->sk; |
564 | if (!sk) { |
565 | ret = -EINVAL; |
566 | goto out; |
567 | } |
568 | if (!sock_map_sk_is_suitable(sk)) { |
569 | ret = -EOPNOTSUPP; |
570 | goto out; |
571 | } |
572 | |
573 | sock_map_sk_acquire(sk); |
574 | if (!sock_map_sk_state_allowed(sk)) |
575 | ret = -EOPNOTSUPP; |
576 | else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) |
577 | ret = sock_map_update_common(map, idx: *(u32 *)key, sk, flags); |
578 | else |
579 | ret = sock_hash_update_common(map, key, sk, flags); |
580 | sock_map_sk_release(sk); |
581 | out: |
582 | sockfd_put(sock); |
583 | return ret; |
584 | } |
585 | |
586 | static long sock_map_update_elem(struct bpf_map *map, void *key, |
587 | void *value, u64 flags) |
588 | { |
589 | struct sock *sk = (struct sock *)value; |
590 | int ret; |
591 | |
592 | if (unlikely(!sk || !sk_fullsock(sk))) |
593 | return -EINVAL; |
594 | |
595 | if (!sock_map_sk_is_suitable(sk)) |
596 | return -EOPNOTSUPP; |
597 | |
598 | local_bh_disable(); |
599 | bh_lock_sock(sk); |
600 | if (!sock_map_sk_state_allowed(sk)) |
601 | ret = -EOPNOTSUPP; |
602 | else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) |
603 | ret = sock_map_update_common(map, idx: *(u32 *)key, sk, flags); |
604 | else |
605 | ret = sock_hash_update_common(map, key, sk, flags); |
606 | bh_unlock_sock(sk); |
607 | local_bh_enable(); |
608 | return ret; |
609 | } |
610 | |
611 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, |
612 | struct bpf_map *, map, void *, key, u64, flags) |
613 | { |
614 | WARN_ON_ONCE(!rcu_read_lock_held()); |
615 | |
616 | if (likely(sock_map_sk_is_suitable(sops->sk) && |
617 | sock_map_op_okay(sops))) |
618 | return sock_map_update_common(map, idx: *(u32 *)key, sk: sops->sk, |
619 | flags); |
620 | return -EOPNOTSUPP; |
621 | } |
622 | |
623 | const struct bpf_func_proto bpf_sock_map_update_proto = { |
624 | .func = bpf_sock_map_update, |
625 | .gpl_only = false, |
626 | .pkt_access = true, |
627 | .ret_type = RET_INTEGER, |
628 | .arg1_type = ARG_PTR_TO_CTX, |
629 | .arg2_type = ARG_CONST_MAP_PTR, |
630 | .arg3_type = ARG_PTR_TO_MAP_KEY, |
631 | .arg4_type = ARG_ANYTHING, |
632 | }; |
633 | |
634 | BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, |
635 | struct bpf_map *, map, u32, key, u64, flags) |
636 | { |
637 | struct sock *sk; |
638 | |
639 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
640 | return SK_DROP; |
641 | |
642 | sk = __sock_map_lookup_elem(map, key); |
643 | if (unlikely(!sk || !sock_map_redirect_allowed(sk))) |
644 | return SK_DROP; |
645 | |
646 | skb_bpf_set_redir(skb, sk_redir: sk, ingress: flags & BPF_F_INGRESS); |
647 | return SK_PASS; |
648 | } |
649 | |
650 | const struct bpf_func_proto bpf_sk_redirect_map_proto = { |
651 | .func = bpf_sk_redirect_map, |
652 | .gpl_only = false, |
653 | .ret_type = RET_INTEGER, |
654 | .arg1_type = ARG_PTR_TO_CTX, |
655 | .arg2_type = ARG_CONST_MAP_PTR, |
656 | .arg3_type = ARG_ANYTHING, |
657 | .arg4_type = ARG_ANYTHING, |
658 | }; |
659 | |
660 | BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, |
661 | struct bpf_map *, map, u32, key, u64, flags) |
662 | { |
663 | struct sock *sk; |
664 | |
665 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
666 | return SK_DROP; |
667 | |
668 | sk = __sock_map_lookup_elem(map, key); |
669 | if (unlikely(!sk || !sock_map_redirect_allowed(sk))) |
670 | return SK_DROP; |
671 | if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) |
672 | return SK_DROP; |
673 | |
674 | msg->flags = flags; |
675 | msg->sk_redir = sk; |
676 | return SK_PASS; |
677 | } |
678 | |
679 | const struct bpf_func_proto bpf_msg_redirect_map_proto = { |
680 | .func = bpf_msg_redirect_map, |
681 | .gpl_only = false, |
682 | .ret_type = RET_INTEGER, |
683 | .arg1_type = ARG_PTR_TO_CTX, |
684 | .arg2_type = ARG_CONST_MAP_PTR, |
685 | .arg3_type = ARG_ANYTHING, |
686 | .arg4_type = ARG_ANYTHING, |
687 | }; |
688 | |
689 | struct sock_map_seq_info { |
690 | struct bpf_map *map; |
691 | struct sock *sk; |
692 | u32 index; |
693 | }; |
694 | |
695 | struct bpf_iter__sockmap { |
696 | __bpf_md_ptr(struct bpf_iter_meta *, meta); |
697 | __bpf_md_ptr(struct bpf_map *, map); |
698 | __bpf_md_ptr(void *, key); |
699 | __bpf_md_ptr(struct sock *, sk); |
700 | }; |
701 | |
702 | DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, |
703 | struct bpf_map *map, void *key, |
704 | struct sock *sk) |
705 | |
706 | static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) |
707 | { |
708 | if (unlikely(info->index >= info->map->max_entries)) |
709 | return NULL; |
710 | |
711 | info->sk = __sock_map_lookup_elem(map: info->map, key: info->index); |
712 | |
713 | /* can't return sk directly, since that might be NULL */ |
714 | return info; |
715 | } |
716 | |
717 | static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) |
718 | __acquires(rcu) |
719 | { |
720 | struct sock_map_seq_info *info = seq->private; |
721 | |
722 | if (*pos == 0) |
723 | ++*pos; |
724 | |
725 | /* pairs with sock_map_seq_stop */ |
726 | rcu_read_lock(); |
727 | return sock_map_seq_lookup_elem(info); |
728 | } |
729 | |
730 | static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
731 | __must_hold(rcu) |
732 | { |
733 | struct sock_map_seq_info *info = seq->private; |
734 | |
735 | ++*pos; |
736 | ++info->index; |
737 | |
738 | return sock_map_seq_lookup_elem(info); |
739 | } |
740 | |
741 | static int sock_map_seq_show(struct seq_file *seq, void *v) |
742 | __must_hold(rcu) |
743 | { |
744 | struct sock_map_seq_info *info = seq->private; |
745 | struct bpf_iter__sockmap ctx = {}; |
746 | struct bpf_iter_meta meta; |
747 | struct bpf_prog *prog; |
748 | |
749 | meta.seq = seq; |
750 | prog = bpf_iter_get_info(meta: &meta, in_stop: !v); |
751 | if (!prog) |
752 | return 0; |
753 | |
754 | ctx.meta = &meta; |
755 | ctx.map = info->map; |
756 | if (v) { |
757 | ctx.key = &info->index; |
758 | ctx.sk = info->sk; |
759 | } |
760 | |
761 | return bpf_iter_run_prog(prog, ctx: &ctx); |
762 | } |
763 | |
764 | static void sock_map_seq_stop(struct seq_file *seq, void *v) |
765 | __releases(rcu) |
766 | { |
767 | if (!v) |
768 | (void)sock_map_seq_show(seq, NULL); |
769 | |
770 | /* pairs with sock_map_seq_start */ |
771 | rcu_read_unlock(); |
772 | } |
773 | |
774 | static const struct seq_operations sock_map_seq_ops = { |
775 | .start = sock_map_seq_start, |
776 | .next = sock_map_seq_next, |
777 | .stop = sock_map_seq_stop, |
778 | .show = sock_map_seq_show, |
779 | }; |
780 | |
781 | static int sock_map_init_seq_private(void *priv_data, |
782 | struct bpf_iter_aux_info *aux) |
783 | { |
784 | struct sock_map_seq_info *info = priv_data; |
785 | |
786 | bpf_map_inc_with_uref(map: aux->map); |
787 | info->map = aux->map; |
788 | return 0; |
789 | } |
790 | |
791 | static void sock_map_fini_seq_private(void *priv_data) |
792 | { |
793 | struct sock_map_seq_info *info = priv_data; |
794 | |
795 | bpf_map_put_with_uref(map: info->map); |
796 | } |
797 | |
798 | static u64 sock_map_mem_usage(const struct bpf_map *map) |
799 | { |
800 | u64 usage = sizeof(struct bpf_stab); |
801 | |
802 | usage += (u64)map->max_entries * sizeof(struct sock *); |
803 | return usage; |
804 | } |
805 | |
806 | static const struct bpf_iter_seq_info sock_map_iter_seq_info = { |
807 | .seq_ops = &sock_map_seq_ops, |
808 | .init_seq_private = sock_map_init_seq_private, |
809 | .fini_seq_private = sock_map_fini_seq_private, |
810 | .seq_priv_size = sizeof(struct sock_map_seq_info), |
811 | }; |
812 | |
813 | BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab) |
814 | const struct bpf_map_ops sock_map_ops = { |
815 | .map_meta_equal = bpf_map_meta_equal, |
816 | .map_alloc = sock_map_alloc, |
817 | .map_free = sock_map_free, |
818 | .map_get_next_key = sock_map_get_next_key, |
819 | .map_lookup_elem_sys_only = sock_map_lookup_sys, |
820 | .map_update_elem = sock_map_update_elem, |
821 | .map_delete_elem = sock_map_delete_elem, |
822 | .map_lookup_elem = sock_map_lookup, |
823 | .map_release_uref = sock_map_release_progs, |
824 | .map_check_btf = map_check_no_btf, |
825 | .map_mem_usage = sock_map_mem_usage, |
826 | .map_btf_id = &sock_map_btf_ids[0], |
827 | .iter_seq_info = &sock_map_iter_seq_info, |
828 | }; |
829 | |
830 | struct bpf_shtab_elem { |
831 | struct rcu_head rcu; |
832 | u32 hash; |
833 | struct sock *sk; |
834 | struct hlist_node node; |
835 | u8 key[]; |
836 | }; |
837 | |
838 | struct bpf_shtab_bucket { |
839 | struct hlist_head head; |
840 | spinlock_t lock; |
841 | }; |
842 | |
843 | struct bpf_shtab { |
844 | struct bpf_map map; |
845 | struct bpf_shtab_bucket *buckets; |
846 | u32 buckets_num; |
847 | u32 elem_size; |
848 | struct sk_psock_progs progs; |
849 | atomic_t count; |
850 | }; |
851 | |
852 | static inline u32 sock_hash_bucket_hash(const void *key, u32 len) |
853 | { |
854 | return jhash(key, length: len, initval: 0); |
855 | } |
856 | |
857 | static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, |
858 | u32 hash) |
859 | { |
860 | return &htab->buckets[hash & (htab->buckets_num - 1)]; |
861 | } |
862 | |
863 | static struct bpf_shtab_elem * |
864 | sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, |
865 | u32 key_size) |
866 | { |
867 | struct bpf_shtab_elem *elem; |
868 | |
869 | hlist_for_each_entry_rcu(elem, head, node) { |
870 | if (elem->hash == hash && |
871 | !memcmp(p: &elem->key, q: key, size: key_size)) |
872 | return elem; |
873 | } |
874 | |
875 | return NULL; |
876 | } |
877 | |
878 | static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) |
879 | { |
880 | struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); |
881 | u32 key_size = map->key_size, hash; |
882 | struct bpf_shtab_bucket *bucket; |
883 | struct bpf_shtab_elem *elem; |
884 | |
885 | WARN_ON_ONCE(!rcu_read_lock_held()); |
886 | |
887 | hash = sock_hash_bucket_hash(key, len: key_size); |
888 | bucket = sock_hash_select_bucket(htab, hash); |
889 | elem = sock_hash_lookup_elem_raw(head: &bucket->head, hash, key, key_size); |
890 | |
891 | return elem ? elem->sk : NULL; |
892 | } |
893 | |
894 | static void sock_hash_free_elem(struct bpf_shtab *htab, |
895 | struct bpf_shtab_elem *elem) |
896 | { |
897 | atomic_dec(v: &htab->count); |
898 | kfree_rcu(elem, rcu); |
899 | } |
900 | |
901 | static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, |
902 | void *link_raw) |
903 | { |
904 | struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); |
905 | struct bpf_shtab_elem *elem_probe, *elem = link_raw; |
906 | struct bpf_shtab_bucket *bucket; |
907 | |
908 | WARN_ON_ONCE(!rcu_read_lock_held()); |
909 | bucket = sock_hash_select_bucket(htab, hash: elem->hash); |
910 | |
911 | /* elem may be deleted in parallel from the map, but access here |
912 | * is okay since it's going away only after RCU grace period. |
913 | * However, we need to check whether it's still present. |
914 | */ |
915 | spin_lock_bh(lock: &bucket->lock); |
916 | elem_probe = sock_hash_lookup_elem_raw(head: &bucket->head, hash: elem->hash, |
917 | key: elem->key, key_size: map->key_size); |
918 | if (elem_probe && elem_probe == elem) { |
919 | hlist_del_rcu(n: &elem->node); |
920 | sock_map_unref(sk: elem->sk, link_raw: elem); |
921 | sock_hash_free_elem(htab, elem); |
922 | } |
923 | spin_unlock_bh(lock: &bucket->lock); |
924 | } |
925 | |
926 | static long sock_hash_delete_elem(struct bpf_map *map, void *key) |
927 | { |
928 | struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); |
929 | u32 hash, key_size = map->key_size; |
930 | struct bpf_shtab_bucket *bucket; |
931 | struct bpf_shtab_elem *elem; |
932 | int ret = -ENOENT; |
933 | |
934 | hash = sock_hash_bucket_hash(key, len: key_size); |
935 | bucket = sock_hash_select_bucket(htab, hash); |
936 | |
937 | spin_lock_bh(lock: &bucket->lock); |
938 | elem = sock_hash_lookup_elem_raw(head: &bucket->head, hash, key, key_size); |
939 | if (elem) { |
940 | hlist_del_rcu(n: &elem->node); |
941 | sock_map_unref(sk: elem->sk, link_raw: elem); |
942 | sock_hash_free_elem(htab, elem); |
943 | ret = 0; |
944 | } |
945 | spin_unlock_bh(lock: &bucket->lock); |
946 | return ret; |
947 | } |
948 | |
949 | static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, |
950 | void *key, u32 key_size, |
951 | u32 hash, struct sock *sk, |
952 | struct bpf_shtab_elem *old) |
953 | { |
954 | struct bpf_shtab_elem *new; |
955 | |
956 | if (atomic_inc_return(v: &htab->count) > htab->map.max_entries) { |
957 | if (!old) { |
958 | atomic_dec(v: &htab->count); |
959 | return ERR_PTR(error: -E2BIG); |
960 | } |
961 | } |
962 | |
963 | new = bpf_map_kmalloc_node(map: &htab->map, size: htab->elem_size, |
964 | GFP_ATOMIC | __GFP_NOWARN, |
965 | node: htab->map.numa_node); |
966 | if (!new) { |
967 | atomic_dec(v: &htab->count); |
968 | return ERR_PTR(error: -ENOMEM); |
969 | } |
970 | memcpy(new->key, key, key_size); |
971 | new->sk = sk; |
972 | new->hash = hash; |
973 | return new; |
974 | } |
975 | |
976 | static int sock_hash_update_common(struct bpf_map *map, void *key, |
977 | struct sock *sk, u64 flags) |
978 | { |
979 | struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); |
980 | u32 key_size = map->key_size, hash; |
981 | struct bpf_shtab_elem *elem, *elem_new; |
982 | struct bpf_shtab_bucket *bucket; |
983 | struct sk_psock_link *link; |
984 | struct sk_psock *psock; |
985 | int ret; |
986 | |
987 | WARN_ON_ONCE(!rcu_read_lock_held()); |
988 | if (unlikely(flags > BPF_EXIST)) |
989 | return -EINVAL; |
990 | |
991 | link = sk_psock_init_link(); |
992 | if (!link) |
993 | return -ENOMEM; |
994 | |
995 | ret = sock_map_link(map, sk); |
996 | if (ret < 0) |
997 | goto out_free; |
998 | |
999 | psock = sk_psock(sk); |
1000 | WARN_ON_ONCE(!psock); |
1001 | |
1002 | hash = sock_hash_bucket_hash(key, len: key_size); |
1003 | bucket = sock_hash_select_bucket(htab, hash); |
1004 | |
1005 | spin_lock_bh(lock: &bucket->lock); |
1006 | elem = sock_hash_lookup_elem_raw(head: &bucket->head, hash, key, key_size); |
1007 | if (elem && flags == BPF_NOEXIST) { |
1008 | ret = -EEXIST; |
1009 | goto out_unlock; |
1010 | } else if (!elem && flags == BPF_EXIST) { |
1011 | ret = -ENOENT; |
1012 | goto out_unlock; |
1013 | } |
1014 | |
1015 | elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, old: elem); |
1016 | if (IS_ERR(ptr: elem_new)) { |
1017 | ret = PTR_ERR(ptr: elem_new); |
1018 | goto out_unlock; |
1019 | } |
1020 | |
1021 | sock_map_add_link(psock, link, map, link_raw: elem_new); |
1022 | /* Add new element to the head of the list, so that |
1023 | * concurrent search will find it before old elem. |
1024 | */ |
1025 | hlist_add_head_rcu(n: &elem_new->node, h: &bucket->head); |
1026 | if (elem) { |
1027 | hlist_del_rcu(n: &elem->node); |
1028 | sock_map_unref(sk: elem->sk, link_raw: elem); |
1029 | sock_hash_free_elem(htab, elem); |
1030 | } |
1031 | spin_unlock_bh(lock: &bucket->lock); |
1032 | return 0; |
1033 | out_unlock: |
1034 | spin_unlock_bh(lock: &bucket->lock); |
1035 | sk_psock_put(sk, psock); |
1036 | out_free: |
1037 | sk_psock_free_link(link); |
1038 | return ret; |
1039 | } |
1040 | |
1041 | static int sock_hash_get_next_key(struct bpf_map *map, void *key, |
1042 | void *key_next) |
1043 | { |
1044 | struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); |
1045 | struct bpf_shtab_elem *elem, *elem_next; |
1046 | u32 hash, key_size = map->key_size; |
1047 | struct hlist_head *head; |
1048 | int i = 0; |
1049 | |
1050 | if (!key) |
1051 | goto find_first_elem; |
1052 | hash = sock_hash_bucket_hash(key, len: key_size); |
1053 | head = &sock_hash_select_bucket(htab, hash)->head; |
1054 | elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); |
1055 | if (!elem) |
1056 | goto find_first_elem; |
1057 | |
1058 | elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)), |
1059 | struct bpf_shtab_elem, node); |
1060 | if (elem_next) { |
1061 | memcpy(key_next, elem_next->key, key_size); |
1062 | return 0; |
1063 | } |
1064 | |
1065 | i = hash & (htab->buckets_num - 1); |
1066 | i++; |
1067 | find_first_elem: |
1068 | for (; i < htab->buckets_num; i++) { |
1069 | head = &sock_hash_select_bucket(htab, hash: i)->head; |
1070 | elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)), |
1071 | struct bpf_shtab_elem, node); |
1072 | if (elem_next) { |
1073 | memcpy(key_next, elem_next->key, key_size); |
1074 | return 0; |
1075 | } |
1076 | } |
1077 | |
1078 | return -ENOENT; |
1079 | } |
1080 | |
1081 | static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) |
1082 | { |
1083 | struct bpf_shtab *htab; |
1084 | int i, err; |
1085 | |
1086 | if (attr->max_entries == 0 || |
1087 | attr->key_size == 0 || |
1088 | (attr->value_size != sizeof(u32) && |
1089 | attr->value_size != sizeof(u64)) || |
1090 | attr->map_flags & ~SOCK_CREATE_FLAG_MASK) |
1091 | return ERR_PTR(error: -EINVAL); |
1092 | if (attr->key_size > MAX_BPF_STACK) |
1093 | return ERR_PTR(error: -E2BIG); |
1094 | |
1095 | htab = bpf_map_area_alloc(size: sizeof(*htab), NUMA_NO_NODE); |
1096 | if (!htab) |
1097 | return ERR_PTR(error: -ENOMEM); |
1098 | |
1099 | bpf_map_init_from_attr(map: &htab->map, attr); |
1100 | |
1101 | htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); |
1102 | htab->elem_size = sizeof(struct bpf_shtab_elem) + |
1103 | round_up(htab->map.key_size, 8); |
1104 | if (htab->buckets_num == 0 || |
1105 | htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { |
1106 | err = -EINVAL; |
1107 | goto free_htab; |
1108 | } |
1109 | |
1110 | htab->buckets = bpf_map_area_alloc(size: htab->buckets_num * |
1111 | sizeof(struct bpf_shtab_bucket), |
1112 | numa_node: htab->map.numa_node); |
1113 | if (!htab->buckets) { |
1114 | err = -ENOMEM; |
1115 | goto free_htab; |
1116 | } |
1117 | |
1118 | for (i = 0; i < htab->buckets_num; i++) { |
1119 | INIT_HLIST_HEAD(&htab->buckets[i].head); |
1120 | spin_lock_init(&htab->buckets[i].lock); |
1121 | } |
1122 | |
1123 | return &htab->map; |
1124 | free_htab: |
1125 | bpf_map_area_free(base: htab); |
1126 | return ERR_PTR(error: err); |
1127 | } |
1128 | |
1129 | static void sock_hash_free(struct bpf_map *map) |
1130 | { |
1131 | struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); |
1132 | struct bpf_shtab_bucket *bucket; |
1133 | struct hlist_head unlink_list; |
1134 | struct bpf_shtab_elem *elem; |
1135 | struct hlist_node *node; |
1136 | int i; |
1137 | |
1138 | /* After the sync no updates or deletes will be in-flight so it |
1139 | * is safe to walk map and remove entries without risking a race |
1140 | * in EEXIST update case. |
1141 | */ |
1142 | synchronize_rcu(); |
1143 | for (i = 0; i < htab->buckets_num; i++) { |
1144 | bucket = sock_hash_select_bucket(htab, hash: i); |
1145 | |
1146 | /* We are racing with sock_hash_delete_from_link to |
1147 | * enter the spin-lock critical section. Every socket on |
1148 | * the list is still linked to sockhash. Since link |
1149 | * exists, psock exists and holds a ref to socket. That |
1150 | * lets us to grab a socket ref too. |
1151 | */ |
1152 | spin_lock_bh(lock: &bucket->lock); |
1153 | hlist_for_each_entry(elem, &bucket->head, node) |
1154 | sock_hold(sk: elem->sk); |
1155 | hlist_move_list(old: &bucket->head, new: &unlink_list); |
1156 | spin_unlock_bh(lock: &bucket->lock); |
1157 | |
1158 | /* Process removed entries out of atomic context to |
1159 | * block for socket lock before deleting the psock's |
1160 | * link to sockhash. |
1161 | */ |
1162 | hlist_for_each_entry_safe(elem, node, &unlink_list, node) { |
1163 | hlist_del(n: &elem->node); |
1164 | lock_sock(sk: elem->sk); |
1165 | rcu_read_lock(); |
1166 | sock_map_unref(sk: elem->sk, link_raw: elem); |
1167 | rcu_read_unlock(); |
1168 | release_sock(sk: elem->sk); |
1169 | sock_put(sk: elem->sk); |
1170 | sock_hash_free_elem(htab, elem); |
1171 | } |
1172 | } |
1173 | |
1174 | /* wait for psock readers accessing its map link */ |
1175 | synchronize_rcu(); |
1176 | |
1177 | bpf_map_area_free(base: htab->buckets); |
1178 | bpf_map_area_free(base: htab); |
1179 | } |
1180 | |
1181 | static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) |
1182 | { |
1183 | struct sock *sk; |
1184 | |
1185 | if (map->value_size != sizeof(u64)) |
1186 | return ERR_PTR(error: -ENOSPC); |
1187 | |
1188 | sk = __sock_hash_lookup_elem(map, key); |
1189 | if (!sk) |
1190 | return ERR_PTR(error: -ENOENT); |
1191 | |
1192 | __sock_gen_cookie(sk); |
1193 | return &sk->sk_cookie; |
1194 | } |
1195 | |
1196 | static void *sock_hash_lookup(struct bpf_map *map, void *key) |
1197 | { |
1198 | struct sock *sk; |
1199 | |
1200 | sk = __sock_hash_lookup_elem(map, key); |
1201 | if (!sk) |
1202 | return NULL; |
1203 | if (sk_is_refcounted(sk) && !refcount_inc_not_zero(r: &sk->sk_refcnt)) |
1204 | return NULL; |
1205 | return sk; |
1206 | } |
1207 | |
1208 | static void sock_hash_release_progs(struct bpf_map *map) |
1209 | { |
1210 | psock_progs_drop(progs: &container_of(map, struct bpf_shtab, map)->progs); |
1211 | } |
1212 | |
1213 | BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, |
1214 | struct bpf_map *, map, void *, key, u64, flags) |
1215 | { |
1216 | WARN_ON_ONCE(!rcu_read_lock_held()); |
1217 | |
1218 | if (likely(sock_map_sk_is_suitable(sops->sk) && |
1219 | sock_map_op_okay(sops))) |
1220 | return sock_hash_update_common(map, key, sk: sops->sk, flags); |
1221 | return -EOPNOTSUPP; |
1222 | } |
1223 | |
1224 | const struct bpf_func_proto bpf_sock_hash_update_proto = { |
1225 | .func = bpf_sock_hash_update, |
1226 | .gpl_only = false, |
1227 | .pkt_access = true, |
1228 | .ret_type = RET_INTEGER, |
1229 | .arg1_type = ARG_PTR_TO_CTX, |
1230 | .arg2_type = ARG_CONST_MAP_PTR, |
1231 | .arg3_type = ARG_PTR_TO_MAP_KEY, |
1232 | .arg4_type = ARG_ANYTHING, |
1233 | }; |
1234 | |
1235 | BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, |
1236 | struct bpf_map *, map, void *, key, u64, flags) |
1237 | { |
1238 | struct sock *sk; |
1239 | |
1240 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
1241 | return SK_DROP; |
1242 | |
1243 | sk = __sock_hash_lookup_elem(map, key); |
1244 | if (unlikely(!sk || !sock_map_redirect_allowed(sk))) |
1245 | return SK_DROP; |
1246 | |
1247 | skb_bpf_set_redir(skb, sk_redir: sk, ingress: flags & BPF_F_INGRESS); |
1248 | return SK_PASS; |
1249 | } |
1250 | |
1251 | const struct bpf_func_proto bpf_sk_redirect_hash_proto = { |
1252 | .func = bpf_sk_redirect_hash, |
1253 | .gpl_only = false, |
1254 | .ret_type = RET_INTEGER, |
1255 | .arg1_type = ARG_PTR_TO_CTX, |
1256 | .arg2_type = ARG_CONST_MAP_PTR, |
1257 | .arg3_type = ARG_PTR_TO_MAP_KEY, |
1258 | .arg4_type = ARG_ANYTHING, |
1259 | }; |
1260 | |
1261 | BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, |
1262 | struct bpf_map *, map, void *, key, u64, flags) |
1263 | { |
1264 | struct sock *sk; |
1265 | |
1266 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
1267 | return SK_DROP; |
1268 | |
1269 | sk = __sock_hash_lookup_elem(map, key); |
1270 | if (unlikely(!sk || !sock_map_redirect_allowed(sk))) |
1271 | return SK_DROP; |
1272 | if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) |
1273 | return SK_DROP; |
1274 | |
1275 | msg->flags = flags; |
1276 | msg->sk_redir = sk; |
1277 | return SK_PASS; |
1278 | } |
1279 | |
1280 | const struct bpf_func_proto bpf_msg_redirect_hash_proto = { |
1281 | .func = bpf_msg_redirect_hash, |
1282 | .gpl_only = false, |
1283 | .ret_type = RET_INTEGER, |
1284 | .arg1_type = ARG_PTR_TO_CTX, |
1285 | .arg2_type = ARG_CONST_MAP_PTR, |
1286 | .arg3_type = ARG_PTR_TO_MAP_KEY, |
1287 | .arg4_type = ARG_ANYTHING, |
1288 | }; |
1289 | |
1290 | struct sock_hash_seq_info { |
1291 | struct bpf_map *map; |
1292 | struct bpf_shtab *htab; |
1293 | u32 bucket_id; |
1294 | }; |
1295 | |
1296 | static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, |
1297 | struct bpf_shtab_elem *prev_elem) |
1298 | { |
1299 | const struct bpf_shtab *htab = info->htab; |
1300 | struct bpf_shtab_bucket *bucket; |
1301 | struct bpf_shtab_elem *elem; |
1302 | struct hlist_node *node; |
1303 | |
1304 | /* try to find next elem in the same bucket */ |
1305 | if (prev_elem) { |
1306 | node = rcu_dereference(hlist_next_rcu(&prev_elem->node)); |
1307 | elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); |
1308 | if (elem) |
1309 | return elem; |
1310 | |
1311 | /* no more elements, continue in the next bucket */ |
1312 | info->bucket_id++; |
1313 | } |
1314 | |
1315 | for (; info->bucket_id < htab->buckets_num; info->bucket_id++) { |
1316 | bucket = &htab->buckets[info->bucket_id]; |
1317 | node = rcu_dereference(hlist_first_rcu(&bucket->head)); |
1318 | elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); |
1319 | if (elem) |
1320 | return elem; |
1321 | } |
1322 | |
1323 | return NULL; |
1324 | } |
1325 | |
1326 | static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) |
1327 | __acquires(rcu) |
1328 | { |
1329 | struct sock_hash_seq_info *info = seq->private; |
1330 | |
1331 | if (*pos == 0) |
1332 | ++*pos; |
1333 | |
1334 | /* pairs with sock_hash_seq_stop */ |
1335 | rcu_read_lock(); |
1336 | return sock_hash_seq_find_next(info, NULL); |
1337 | } |
1338 | |
1339 | static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1340 | __must_hold(rcu) |
1341 | { |
1342 | struct sock_hash_seq_info *info = seq->private; |
1343 | |
1344 | ++*pos; |
1345 | return sock_hash_seq_find_next(info, prev_elem: v); |
1346 | } |
1347 | |
1348 | static int sock_hash_seq_show(struct seq_file *seq, void *v) |
1349 | __must_hold(rcu) |
1350 | { |
1351 | struct sock_hash_seq_info *info = seq->private; |
1352 | struct bpf_iter__sockmap ctx = {}; |
1353 | struct bpf_shtab_elem *elem = v; |
1354 | struct bpf_iter_meta meta; |
1355 | struct bpf_prog *prog; |
1356 | |
1357 | meta.seq = seq; |
1358 | prog = bpf_iter_get_info(meta: &meta, in_stop: !elem); |
1359 | if (!prog) |
1360 | return 0; |
1361 | |
1362 | ctx.meta = &meta; |
1363 | ctx.map = info->map; |
1364 | if (elem) { |
1365 | ctx.key = elem->key; |
1366 | ctx.sk = elem->sk; |
1367 | } |
1368 | |
1369 | return bpf_iter_run_prog(prog, ctx: &ctx); |
1370 | } |
1371 | |
1372 | static void sock_hash_seq_stop(struct seq_file *seq, void *v) |
1373 | __releases(rcu) |
1374 | { |
1375 | if (!v) |
1376 | (void)sock_hash_seq_show(seq, NULL); |
1377 | |
1378 | /* pairs with sock_hash_seq_start */ |
1379 | rcu_read_unlock(); |
1380 | } |
1381 | |
1382 | static const struct seq_operations sock_hash_seq_ops = { |
1383 | .start = sock_hash_seq_start, |
1384 | .next = sock_hash_seq_next, |
1385 | .stop = sock_hash_seq_stop, |
1386 | .show = sock_hash_seq_show, |
1387 | }; |
1388 | |
1389 | static int sock_hash_init_seq_private(void *priv_data, |
1390 | struct bpf_iter_aux_info *aux) |
1391 | { |
1392 | struct sock_hash_seq_info *info = priv_data; |
1393 | |
1394 | bpf_map_inc_with_uref(map: aux->map); |
1395 | info->map = aux->map; |
1396 | info->htab = container_of(aux->map, struct bpf_shtab, map); |
1397 | return 0; |
1398 | } |
1399 | |
1400 | static void sock_hash_fini_seq_private(void *priv_data) |
1401 | { |
1402 | struct sock_hash_seq_info *info = priv_data; |
1403 | |
1404 | bpf_map_put_with_uref(map: info->map); |
1405 | } |
1406 | |
1407 | static u64 sock_hash_mem_usage(const struct bpf_map *map) |
1408 | { |
1409 | struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); |
1410 | u64 usage = sizeof(*htab); |
1411 | |
1412 | usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket); |
1413 | usage += atomic_read(v: &htab->count) * (u64)htab->elem_size; |
1414 | return usage; |
1415 | } |
1416 | |
1417 | static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { |
1418 | .seq_ops = &sock_hash_seq_ops, |
1419 | .init_seq_private = sock_hash_init_seq_private, |
1420 | .fini_seq_private = sock_hash_fini_seq_private, |
1421 | .seq_priv_size = sizeof(struct sock_hash_seq_info), |
1422 | }; |
1423 | |
1424 | BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab) |
1425 | const struct bpf_map_ops sock_hash_ops = { |
1426 | .map_meta_equal = bpf_map_meta_equal, |
1427 | .map_alloc = sock_hash_alloc, |
1428 | .map_free = sock_hash_free, |
1429 | .map_get_next_key = sock_hash_get_next_key, |
1430 | .map_update_elem = sock_map_update_elem, |
1431 | .map_delete_elem = sock_hash_delete_elem, |
1432 | .map_lookup_elem = sock_hash_lookup, |
1433 | .map_lookup_elem_sys_only = sock_hash_lookup_sys, |
1434 | .map_release_uref = sock_hash_release_progs, |
1435 | .map_check_btf = map_check_no_btf, |
1436 | .map_mem_usage = sock_hash_mem_usage, |
1437 | .map_btf_id = &sock_hash_map_btf_ids[0], |
1438 | .iter_seq_info = &sock_hash_iter_seq_info, |
1439 | }; |
1440 | |
1441 | static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) |
1442 | { |
1443 | switch (map->map_type) { |
1444 | case BPF_MAP_TYPE_SOCKMAP: |
1445 | return &container_of(map, struct bpf_stab, map)->progs; |
1446 | case BPF_MAP_TYPE_SOCKHASH: |
1447 | return &container_of(map, struct bpf_shtab, map)->progs; |
1448 | default: |
1449 | break; |
1450 | } |
1451 | |
1452 | return NULL; |
1453 | } |
1454 | |
1455 | static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog, |
1456 | u32 which) |
1457 | { |
1458 | struct sk_psock_progs *progs = sock_map_progs(map); |
1459 | |
1460 | if (!progs) |
1461 | return -EOPNOTSUPP; |
1462 | |
1463 | switch (which) { |
1464 | case BPF_SK_MSG_VERDICT: |
1465 | *pprog = &progs->msg_parser; |
1466 | break; |
1467 | #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) |
1468 | case BPF_SK_SKB_STREAM_PARSER: |
1469 | *pprog = &progs->stream_parser; |
1470 | break; |
1471 | #endif |
1472 | case BPF_SK_SKB_STREAM_VERDICT: |
1473 | if (progs->skb_verdict) |
1474 | return -EBUSY; |
1475 | *pprog = &progs->stream_verdict; |
1476 | break; |
1477 | case BPF_SK_SKB_VERDICT: |
1478 | if (progs->stream_verdict) |
1479 | return -EBUSY; |
1480 | *pprog = &progs->skb_verdict; |
1481 | break; |
1482 | default: |
1483 | return -EOPNOTSUPP; |
1484 | } |
1485 | |
1486 | return 0; |
1487 | } |
1488 | |
1489 | static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, |
1490 | struct bpf_prog *old, u32 which) |
1491 | { |
1492 | struct bpf_prog **pprog; |
1493 | int ret; |
1494 | |
1495 | ret = sock_map_prog_lookup(map, pprog: &pprog, which); |
1496 | if (ret) |
1497 | return ret; |
1498 | |
1499 | if (old) |
1500 | return psock_replace_prog(pprog, prog, old); |
1501 | |
1502 | psock_set_prog(pprog, prog); |
1503 | return 0; |
1504 | } |
1505 | |
1506 | int sock_map_bpf_prog_query(const union bpf_attr *attr, |
1507 | union bpf_attr __user *uattr) |
1508 | { |
1509 | __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); |
1510 | u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd; |
1511 | struct bpf_prog **pprog; |
1512 | struct bpf_prog *prog; |
1513 | struct bpf_map *map; |
1514 | struct fd f; |
1515 | u32 id = 0; |
1516 | int ret; |
1517 | |
1518 | if (attr->query.query_flags) |
1519 | return -EINVAL; |
1520 | |
1521 | f = fdget(fd: ufd); |
1522 | map = __bpf_map_get(f); |
1523 | if (IS_ERR(ptr: map)) |
1524 | return PTR_ERR(ptr: map); |
1525 | |
1526 | rcu_read_lock(); |
1527 | |
1528 | ret = sock_map_prog_lookup(map, pprog: &pprog, which: attr->query.attach_type); |
1529 | if (ret) |
1530 | goto end; |
1531 | |
1532 | prog = *pprog; |
1533 | prog_cnt = !prog ? 0 : 1; |
1534 | |
1535 | if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) |
1536 | goto end; |
1537 | |
1538 | /* we do not hold the refcnt, the bpf prog may be released |
1539 | * asynchronously and the id would be set to 0. |
1540 | */ |
1541 | id = data_race(prog->aux->id); |
1542 | if (id == 0) |
1543 | prog_cnt = 0; |
1544 | |
1545 | end: |
1546 | rcu_read_unlock(); |
1547 | |
1548 | if (copy_to_user(to: &uattr->query.attach_flags, from: &flags, n: sizeof(flags)) || |
1549 | (id != 0 && copy_to_user(to: prog_ids, from: &id, n: sizeof(u32))) || |
1550 | copy_to_user(to: &uattr->query.prog_cnt, from: &prog_cnt, n: sizeof(prog_cnt))) |
1551 | ret = -EFAULT; |
1552 | |
1553 | fdput(fd: f); |
1554 | return ret; |
1555 | } |
1556 | |
1557 | static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) |
1558 | { |
1559 | switch (link->map->map_type) { |
1560 | case BPF_MAP_TYPE_SOCKMAP: |
1561 | return sock_map_delete_from_link(map: link->map, sk, |
1562 | link_raw: link->link_raw); |
1563 | case BPF_MAP_TYPE_SOCKHASH: |
1564 | return sock_hash_delete_from_link(map: link->map, sk, |
1565 | link_raw: link->link_raw); |
1566 | default: |
1567 | break; |
1568 | } |
1569 | } |
1570 | |
1571 | static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) |
1572 | { |
1573 | struct sk_psock_link *link; |
1574 | |
1575 | while ((link = sk_psock_link_pop(psock))) { |
1576 | sock_map_unlink(sk, link); |
1577 | sk_psock_free_link(link); |
1578 | } |
1579 | } |
1580 | |
1581 | void sock_map_unhash(struct sock *sk) |
1582 | { |
1583 | void (*saved_unhash)(struct sock *sk); |
1584 | struct sk_psock *psock; |
1585 | |
1586 | rcu_read_lock(); |
1587 | psock = sk_psock(sk); |
1588 | if (unlikely(!psock)) { |
1589 | rcu_read_unlock(); |
1590 | saved_unhash = READ_ONCE(sk->sk_prot)->unhash; |
1591 | } else { |
1592 | saved_unhash = psock->saved_unhash; |
1593 | sock_map_remove_links(sk, psock); |
1594 | rcu_read_unlock(); |
1595 | } |
1596 | if (WARN_ON_ONCE(saved_unhash == sock_map_unhash)) |
1597 | return; |
1598 | if (saved_unhash) |
1599 | saved_unhash(sk); |
1600 | } |
1601 | EXPORT_SYMBOL_GPL(sock_map_unhash); |
1602 | |
1603 | void sock_map_destroy(struct sock *sk) |
1604 | { |
1605 | void (*saved_destroy)(struct sock *sk); |
1606 | struct sk_psock *psock; |
1607 | |
1608 | rcu_read_lock(); |
1609 | psock = sk_psock_get(sk); |
1610 | if (unlikely(!psock)) { |
1611 | rcu_read_unlock(); |
1612 | saved_destroy = READ_ONCE(sk->sk_prot)->destroy; |
1613 | } else { |
1614 | saved_destroy = psock->saved_destroy; |
1615 | sock_map_remove_links(sk, psock); |
1616 | rcu_read_unlock(); |
1617 | sk_psock_stop(psock); |
1618 | sk_psock_put(sk, psock); |
1619 | } |
1620 | if (WARN_ON_ONCE(saved_destroy == sock_map_destroy)) |
1621 | return; |
1622 | if (saved_destroy) |
1623 | saved_destroy(sk); |
1624 | } |
1625 | EXPORT_SYMBOL_GPL(sock_map_destroy); |
1626 | |
1627 | void sock_map_close(struct sock *sk, long timeout) |
1628 | { |
1629 | void (*saved_close)(struct sock *sk, long timeout); |
1630 | struct sk_psock *psock; |
1631 | |
1632 | lock_sock(sk); |
1633 | rcu_read_lock(); |
1634 | psock = sk_psock_get(sk); |
1635 | if (unlikely(!psock)) { |
1636 | rcu_read_unlock(); |
1637 | release_sock(sk); |
1638 | saved_close = READ_ONCE(sk->sk_prot)->close; |
1639 | } else { |
1640 | saved_close = psock->saved_close; |
1641 | sock_map_remove_links(sk, psock); |
1642 | rcu_read_unlock(); |
1643 | sk_psock_stop(psock); |
1644 | release_sock(sk); |
1645 | cancel_delayed_work_sync(dwork: &psock->work); |
1646 | sk_psock_put(sk, psock); |
1647 | } |
1648 | |
1649 | /* Make sure we do not recurse. This is a bug. |
1650 | * Leak the socket instead of crashing on a stack overflow. |
1651 | */ |
1652 | if (WARN_ON_ONCE(saved_close == sock_map_close)) |
1653 | return; |
1654 | saved_close(sk, timeout); |
1655 | } |
1656 | EXPORT_SYMBOL_GPL(sock_map_close); |
1657 | |
1658 | static int sock_map_iter_attach_target(struct bpf_prog *prog, |
1659 | union bpf_iter_link_info *linfo, |
1660 | struct bpf_iter_aux_info *aux) |
1661 | { |
1662 | struct bpf_map *map; |
1663 | int err = -EINVAL; |
1664 | |
1665 | if (!linfo->map.map_fd) |
1666 | return -EBADF; |
1667 | |
1668 | map = bpf_map_get_with_uref(ufd: linfo->map.map_fd); |
1669 | if (IS_ERR(ptr: map)) |
1670 | return PTR_ERR(ptr: map); |
1671 | |
1672 | if (map->map_type != BPF_MAP_TYPE_SOCKMAP && |
1673 | map->map_type != BPF_MAP_TYPE_SOCKHASH) |
1674 | goto put_map; |
1675 | |
1676 | if (prog->aux->max_rdonly_access > map->key_size) { |
1677 | err = -EACCES; |
1678 | goto put_map; |
1679 | } |
1680 | |
1681 | aux->map = map; |
1682 | return 0; |
1683 | |
1684 | put_map: |
1685 | bpf_map_put_with_uref(map); |
1686 | return err; |
1687 | } |
1688 | |
1689 | static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux) |
1690 | { |
1691 | bpf_map_put_with_uref(map: aux->map); |
1692 | } |
1693 | |
1694 | static struct bpf_iter_reg sock_map_iter_reg = { |
1695 | .target = "sockmap" , |
1696 | .attach_target = sock_map_iter_attach_target, |
1697 | .detach_target = sock_map_iter_detach_target, |
1698 | .show_fdinfo = bpf_iter_map_show_fdinfo, |
1699 | .fill_link_info = bpf_iter_map_fill_link_info, |
1700 | .ctx_arg_info_size = 2, |
1701 | .ctx_arg_info = { |
1702 | { offsetof(struct bpf_iter__sockmap, key), |
1703 | PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, |
1704 | { offsetof(struct bpf_iter__sockmap, sk), |
1705 | PTR_TO_BTF_ID_OR_NULL }, |
1706 | }, |
1707 | }; |
1708 | |
1709 | static int __init bpf_sockmap_iter_init(void) |
1710 | { |
1711 | sock_map_iter_reg.ctx_arg_info[1].btf_id = |
1712 | btf_sock_ids[BTF_SOCK_TYPE_SOCK]; |
1713 | return bpf_iter_reg_target(reg_info: &sock_map_iter_reg); |
1714 | } |
1715 | late_initcall(bpf_sockmap_iter_init); |
1716 | |