1 | /* |
2 | * Pluggable TCP upper layer protocol support. |
3 | * |
4 | * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. |
5 | * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. |
6 | * |
7 | */ |
8 | |
9 | #include <linux/module.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/types.h> |
12 | #include <linux/list.h> |
13 | #include <linux/gfp.h> |
14 | #include <net/tcp.h> |
15 | |
16 | static DEFINE_SPINLOCK(tcp_ulp_list_lock); |
17 | static LIST_HEAD(tcp_ulp_list); |
18 | |
19 | /* Simple linear search, don't expect many entries! */ |
20 | static struct tcp_ulp_ops *tcp_ulp_find(const char *name) |
21 | { |
22 | struct tcp_ulp_ops *e; |
23 | |
24 | list_for_each_entry_rcu(e, &tcp_ulp_list, list) { |
25 | if (strcmp(e->name, name) == 0) |
26 | return e; |
27 | } |
28 | |
29 | return NULL; |
30 | } |
31 | |
32 | static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name) |
33 | { |
34 | const struct tcp_ulp_ops *ulp = NULL; |
35 | |
36 | rcu_read_lock(); |
37 | ulp = tcp_ulp_find(name); |
38 | |
39 | #ifdef CONFIG_MODULES |
40 | if (!ulp && capable(CAP_NET_ADMIN)) { |
41 | rcu_read_unlock(); |
42 | request_module("tcp-ulp-%s" , name); |
43 | rcu_read_lock(); |
44 | ulp = tcp_ulp_find(name); |
45 | } |
46 | #endif |
47 | if (!ulp || !try_module_get(ulp->owner)) |
48 | ulp = NULL; |
49 | |
50 | rcu_read_unlock(); |
51 | return ulp; |
52 | } |
53 | |
54 | /* Attach new upper layer protocol to the list |
55 | * of available protocols. |
56 | */ |
57 | int tcp_register_ulp(struct tcp_ulp_ops *ulp) |
58 | { |
59 | int ret = 0; |
60 | |
61 | spin_lock(&tcp_ulp_list_lock); |
62 | if (tcp_ulp_find(ulp->name)) |
63 | ret = -EEXIST; |
64 | else |
65 | list_add_tail_rcu(&ulp->list, &tcp_ulp_list); |
66 | spin_unlock(&tcp_ulp_list_lock); |
67 | |
68 | return ret; |
69 | } |
70 | EXPORT_SYMBOL_GPL(tcp_register_ulp); |
71 | |
72 | void tcp_unregister_ulp(struct tcp_ulp_ops *ulp) |
73 | { |
74 | spin_lock(&tcp_ulp_list_lock); |
75 | list_del_rcu(&ulp->list); |
76 | spin_unlock(&tcp_ulp_list_lock); |
77 | |
78 | synchronize_rcu(); |
79 | } |
80 | EXPORT_SYMBOL_GPL(tcp_unregister_ulp); |
81 | |
82 | /* Build string with list of available upper layer protocl values */ |
83 | void tcp_get_available_ulp(char *buf, size_t maxlen) |
84 | { |
85 | struct tcp_ulp_ops *ulp_ops; |
86 | size_t offs = 0; |
87 | |
88 | *buf = '\0'; |
89 | rcu_read_lock(); |
90 | list_for_each_entry_rcu(ulp_ops, &tcp_ulp_list, list) { |
91 | offs += snprintf(buf + offs, maxlen - offs, |
92 | "%s%s" , |
93 | offs == 0 ? "" : " " , ulp_ops->name); |
94 | } |
95 | rcu_read_unlock(); |
96 | } |
97 | |
98 | void tcp_cleanup_ulp(struct sock *sk) |
99 | { |
100 | struct inet_connection_sock *icsk = inet_csk(sk); |
101 | |
102 | /* No sock_owned_by_me() check here as at the time the |
103 | * stack calls this function, the socket is dead and |
104 | * about to be destroyed. |
105 | */ |
106 | if (!icsk->icsk_ulp_ops) |
107 | return; |
108 | |
109 | if (icsk->icsk_ulp_ops->release) |
110 | icsk->icsk_ulp_ops->release(sk); |
111 | module_put(icsk->icsk_ulp_ops->owner); |
112 | |
113 | icsk->icsk_ulp_ops = NULL; |
114 | } |
115 | |
116 | static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops) |
117 | { |
118 | struct inet_connection_sock *icsk = inet_csk(sk); |
119 | int err; |
120 | |
121 | err = -EEXIST; |
122 | if (icsk->icsk_ulp_ops) |
123 | goto out_err; |
124 | |
125 | err = ulp_ops->init(sk); |
126 | if (err) |
127 | goto out_err; |
128 | |
129 | icsk->icsk_ulp_ops = ulp_ops; |
130 | return 0; |
131 | out_err: |
132 | module_put(ulp_ops->owner); |
133 | return err; |
134 | } |
135 | |
136 | int tcp_set_ulp(struct sock *sk, const char *name) |
137 | { |
138 | const struct tcp_ulp_ops *ulp_ops; |
139 | |
140 | sock_owned_by_me(sk); |
141 | |
142 | ulp_ops = __tcp_ulp_find_autoload(name); |
143 | if (!ulp_ops) |
144 | return -ENOENT; |
145 | |
146 | return __tcp_set_ulp(sk, ulp_ops); |
147 | } |
148 | |