1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * File: socket.c |
4 | * |
5 | * Phonet sockets |
6 | * |
7 | * Copyright (C) 2008 Nokia Corporation. |
8 | * |
9 | * Authors: Sakari Ailus <sakari.ailus@nokia.com> |
10 | * RĂ©mi Denis-Courmont |
11 | */ |
12 | |
13 | #include <linux/gfp.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/net.h> |
16 | #include <linux/poll.h> |
17 | #include <linux/sched/signal.h> |
18 | |
19 | #include <net/sock.h> |
20 | #include <net/tcp_states.h> |
21 | |
22 | #include <linux/phonet.h> |
23 | #include <linux/export.h> |
24 | #include <net/phonet/phonet.h> |
25 | #include <net/phonet/pep.h> |
26 | #include <net/phonet/pn_dev.h> |
27 | |
28 | static int pn_socket_release(struct socket *sock) |
29 | { |
30 | struct sock *sk = sock->sk; |
31 | |
32 | if (sk) { |
33 | sock->sk = NULL; |
34 | sk->sk_prot->close(sk, 0); |
35 | } |
36 | return 0; |
37 | } |
38 | |
39 | #define PN_HASHSIZE 16 |
40 | #define PN_HASHMASK (PN_HASHSIZE-1) |
41 | |
42 | |
43 | static struct { |
44 | struct hlist_head hlist[PN_HASHSIZE]; |
45 | struct mutex lock; |
46 | } pnsocks; |
47 | |
48 | void __init pn_sock_init(void) |
49 | { |
50 | unsigned int i; |
51 | |
52 | for (i = 0; i < PN_HASHSIZE; i++) |
53 | INIT_HLIST_HEAD(pnsocks.hlist + i); |
54 | mutex_init(&pnsocks.lock); |
55 | } |
56 | |
57 | static struct hlist_head *pn_hash_list(u16 obj) |
58 | { |
59 | return pnsocks.hlist + (obj & PN_HASHMASK); |
60 | } |
61 | |
62 | /* |
63 | * Find address based on socket address, match only certain fields. |
64 | * Also grab sock if it was found. Remember to sock_put it later. |
65 | */ |
66 | struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) |
67 | { |
68 | struct sock *sknode; |
69 | struct sock *rval = NULL; |
70 | u16 obj = pn_sockaddr_get_object(spn); |
71 | u8 res = spn->spn_resource; |
72 | struct hlist_head *hlist = pn_hash_list(obj); |
73 | |
74 | rcu_read_lock(); |
75 | sk_for_each_rcu(sknode, hlist) { |
76 | struct pn_sock *pn = pn_sk(sk: sknode); |
77 | BUG_ON(!pn->sobject); /* unbound socket */ |
78 | |
79 | if (!net_eq(net1: sock_net(sk: sknode), net2: net)) |
80 | continue; |
81 | if (pn_port(handle: obj)) { |
82 | /* Look up socket by port */ |
83 | if (pn_port(handle: pn->sobject) != pn_port(handle: obj)) |
84 | continue; |
85 | } else { |
86 | /* If port is zero, look up by resource */ |
87 | if (pn->resource != res) |
88 | continue; |
89 | } |
90 | if (pn_addr(handle: pn->sobject) && |
91 | pn_addr(handle: pn->sobject) != pn_addr(handle: obj)) |
92 | continue; |
93 | |
94 | rval = sknode; |
95 | sock_hold(sk: sknode); |
96 | break; |
97 | } |
98 | rcu_read_unlock(); |
99 | |
100 | return rval; |
101 | } |
102 | |
103 | /* Deliver a broadcast packet (only in bottom-half) */ |
104 | void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) |
105 | { |
106 | struct hlist_head *hlist = pnsocks.hlist; |
107 | unsigned int h; |
108 | |
109 | rcu_read_lock(); |
110 | for (h = 0; h < PN_HASHSIZE; h++) { |
111 | struct sock *sknode; |
112 | |
113 | sk_for_each(sknode, hlist) { |
114 | struct sk_buff *clone; |
115 | |
116 | if (!net_eq(net1: sock_net(sk: sknode), net2: net)) |
117 | continue; |
118 | if (!sock_flag(sk: sknode, flag: SOCK_BROADCAST)) |
119 | continue; |
120 | |
121 | clone = skb_clone(skb, GFP_ATOMIC); |
122 | if (clone) { |
123 | sock_hold(sk: sknode); |
124 | sk_receive_skb(sk: sknode, skb: clone, nested: 0); |
125 | } |
126 | } |
127 | hlist++; |
128 | } |
129 | rcu_read_unlock(); |
130 | } |
131 | |
132 | int pn_sock_hash(struct sock *sk) |
133 | { |
134 | struct hlist_head *hlist = pn_hash_list(obj: pn_sk(sk)->sobject); |
135 | |
136 | mutex_lock(&pnsocks.lock); |
137 | sk_add_node_rcu(sk, list: hlist); |
138 | mutex_unlock(lock: &pnsocks.lock); |
139 | |
140 | return 0; |
141 | } |
142 | EXPORT_SYMBOL(pn_sock_hash); |
143 | |
144 | void pn_sock_unhash(struct sock *sk) |
145 | { |
146 | mutex_lock(&pnsocks.lock); |
147 | sk_del_node_init_rcu(sk); |
148 | mutex_unlock(lock: &pnsocks.lock); |
149 | pn_sock_unbind_all_res(sk); |
150 | synchronize_rcu(); |
151 | } |
152 | EXPORT_SYMBOL(pn_sock_unhash); |
153 | |
154 | static DEFINE_MUTEX(port_mutex); |
155 | |
156 | static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) |
157 | { |
158 | struct sock *sk = sock->sk; |
159 | struct pn_sock *pn = pn_sk(sk); |
160 | struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; |
161 | int err; |
162 | u16 handle; |
163 | u8 saddr; |
164 | |
165 | if (sk->sk_prot->bind) |
166 | return sk->sk_prot->bind(sk, addr, len); |
167 | |
168 | if (len < sizeof(struct sockaddr_pn)) |
169 | return -EINVAL; |
170 | if (spn->spn_family != AF_PHONET) |
171 | return -EAFNOSUPPORT; |
172 | |
173 | handle = pn_sockaddr_get_object(spn: (struct sockaddr_pn *)addr); |
174 | saddr = pn_addr(handle); |
175 | if (saddr && phonet_address_lookup(net: sock_net(sk), addr: saddr)) |
176 | return -EADDRNOTAVAIL; |
177 | |
178 | lock_sock(sk); |
179 | if (sk->sk_state != TCP_CLOSE || pn_port(handle: pn->sobject)) { |
180 | err = -EINVAL; /* attempt to rebind */ |
181 | goto out; |
182 | } |
183 | WARN_ON(sk_hashed(sk)); |
184 | mutex_lock(&port_mutex); |
185 | err = sk->sk_prot->get_port(sk, pn_port(handle)); |
186 | if (err) |
187 | goto out_port; |
188 | |
189 | /* get_port() sets the port, bind() sets the address if applicable */ |
190 | pn->sobject = pn_object(addr: saddr, port: pn_port(handle: pn->sobject)); |
191 | pn->resource = spn->spn_resource; |
192 | |
193 | /* Enable RX on the socket */ |
194 | err = sk->sk_prot->hash(sk); |
195 | out_port: |
196 | mutex_unlock(lock: &port_mutex); |
197 | out: |
198 | release_sock(sk); |
199 | return err; |
200 | } |
201 | |
202 | static int pn_socket_autobind(struct socket *sock) |
203 | { |
204 | struct sockaddr_pn sa; |
205 | int err; |
206 | |
207 | memset(&sa, 0, sizeof(sa)); |
208 | sa.spn_family = AF_PHONET; |
209 | err = pn_socket_bind(sock, addr: (struct sockaddr *)&sa, |
210 | len: sizeof(struct sockaddr_pn)); |
211 | if (err != -EINVAL) |
212 | return err; |
213 | BUG_ON(!pn_port(pn_sk(sock->sk)->sobject)); |
214 | return 0; /* socket was already bound */ |
215 | } |
216 | |
217 | static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, |
218 | int len, int flags) |
219 | { |
220 | struct sock *sk = sock->sk; |
221 | struct pn_sock *pn = pn_sk(sk); |
222 | struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; |
223 | struct task_struct *tsk = current; |
224 | long timeo = sock_rcvtimeo(sk, noblock: flags & O_NONBLOCK); |
225 | int err; |
226 | |
227 | if (pn_socket_autobind(sock)) |
228 | return -ENOBUFS; |
229 | if (len < sizeof(struct sockaddr_pn)) |
230 | return -EINVAL; |
231 | if (spn->spn_family != AF_PHONET) |
232 | return -EAFNOSUPPORT; |
233 | |
234 | lock_sock(sk); |
235 | |
236 | switch (sock->state) { |
237 | case SS_UNCONNECTED: |
238 | if (sk->sk_state != TCP_CLOSE) { |
239 | err = -EISCONN; |
240 | goto out; |
241 | } |
242 | break; |
243 | case SS_CONNECTING: |
244 | err = -EALREADY; |
245 | goto out; |
246 | default: |
247 | err = -EISCONN; |
248 | goto out; |
249 | } |
250 | |
251 | pn->dobject = pn_sockaddr_get_object(spn); |
252 | pn->resource = pn_sockaddr_get_resource(spn); |
253 | sock->state = SS_CONNECTING; |
254 | |
255 | err = sk->sk_prot->connect(sk, addr, len); |
256 | if (err) { |
257 | sock->state = SS_UNCONNECTED; |
258 | pn->dobject = 0; |
259 | goto out; |
260 | } |
261 | |
262 | while (sk->sk_state == TCP_SYN_SENT) { |
263 | DEFINE_WAIT(wait); |
264 | |
265 | if (!timeo) { |
266 | err = -EINPROGRESS; |
267 | goto out; |
268 | } |
269 | if (signal_pending(p: tsk)) { |
270 | err = sock_intr_errno(timeo); |
271 | goto out; |
272 | } |
273 | |
274 | prepare_to_wait_exclusive(wq_head: sk_sleep(sk), wq_entry: &wait, |
275 | TASK_INTERRUPTIBLE); |
276 | release_sock(sk); |
277 | timeo = schedule_timeout(timeout: timeo); |
278 | lock_sock(sk); |
279 | finish_wait(wq_head: sk_sleep(sk), wq_entry: &wait); |
280 | } |
281 | |
282 | if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) |
283 | err = 0; |
284 | else if (sk->sk_state == TCP_CLOSE_WAIT) |
285 | err = -ECONNRESET; |
286 | else |
287 | err = -ECONNREFUSED; |
288 | sock->state = err ? SS_UNCONNECTED : SS_CONNECTED; |
289 | out: |
290 | release_sock(sk); |
291 | return err; |
292 | } |
293 | |
294 | static int pn_socket_accept(struct socket *sock, struct socket *newsock, |
295 | int flags, bool kern) |
296 | { |
297 | struct sock *sk = sock->sk; |
298 | struct sock *newsk; |
299 | int err; |
300 | |
301 | if (unlikely(sk->sk_state != TCP_LISTEN)) |
302 | return -EINVAL; |
303 | |
304 | newsk = sk->sk_prot->accept(sk, flags, &err, kern); |
305 | if (!newsk) |
306 | return err; |
307 | |
308 | lock_sock(sk: newsk); |
309 | sock_graft(sk: newsk, parent: newsock); |
310 | newsock->state = SS_CONNECTED; |
311 | release_sock(sk: newsk); |
312 | return 0; |
313 | } |
314 | |
315 | static int pn_socket_getname(struct socket *sock, struct sockaddr *addr, |
316 | int peer) |
317 | { |
318 | struct sock *sk = sock->sk; |
319 | struct pn_sock *pn = pn_sk(sk); |
320 | |
321 | memset(addr, 0, sizeof(struct sockaddr_pn)); |
322 | addr->sa_family = AF_PHONET; |
323 | if (!peer) /* Race with bind() here is userland's problem. */ |
324 | pn_sockaddr_set_object(spn: (struct sockaddr_pn *)addr, |
325 | handle: pn->sobject); |
326 | |
327 | return sizeof(struct sockaddr_pn); |
328 | } |
329 | |
330 | static __poll_t pn_socket_poll(struct file *file, struct socket *sock, |
331 | poll_table *wait) |
332 | { |
333 | struct sock *sk = sock->sk; |
334 | struct pep_sock *pn = pep_sk(sk); |
335 | __poll_t mask = 0; |
336 | |
337 | poll_wait(filp: file, wait_address: sk_sleep(sk), p: wait); |
338 | |
339 | if (sk->sk_state == TCP_CLOSE) |
340 | return EPOLLERR; |
341 | if (!skb_queue_empty_lockless(list: &sk->sk_receive_queue)) |
342 | mask |= EPOLLIN | EPOLLRDNORM; |
343 | if (!skb_queue_empty_lockless(list: &pn->ctrlreq_queue)) |
344 | mask |= EPOLLPRI; |
345 | if (!mask && sk->sk_state == TCP_CLOSE_WAIT) |
346 | return EPOLLHUP; |
347 | |
348 | if (sk->sk_state == TCP_ESTABLISHED && |
349 | refcount_read(r: &sk->sk_wmem_alloc) < sk->sk_sndbuf && |
350 | atomic_read(v: &pn->tx_credits)) |
351 | mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; |
352 | |
353 | return mask; |
354 | } |
355 | |
356 | static int pn_socket_ioctl(struct socket *sock, unsigned int cmd, |
357 | unsigned long arg) |
358 | { |
359 | struct sock *sk = sock->sk; |
360 | struct pn_sock *pn = pn_sk(sk); |
361 | |
362 | if (cmd == SIOCPNGETOBJECT) { |
363 | struct net_device *dev; |
364 | u16 handle; |
365 | u8 saddr; |
366 | |
367 | if (get_user(handle, (__u16 __user *)arg)) |
368 | return -EFAULT; |
369 | |
370 | lock_sock(sk); |
371 | if (sk->sk_bound_dev_if) |
372 | dev = dev_get_by_index(net: sock_net(sk), |
373 | ifindex: sk->sk_bound_dev_if); |
374 | else |
375 | dev = phonet_device_get(net: sock_net(sk)); |
376 | if (dev && (dev->flags & IFF_UP)) |
377 | saddr = phonet_address_get(dev, addr: pn_addr(handle)); |
378 | else |
379 | saddr = PN_NO_ADDR; |
380 | release_sock(sk); |
381 | |
382 | dev_put(dev); |
383 | if (saddr == PN_NO_ADDR) |
384 | return -EHOSTUNREACH; |
385 | |
386 | handle = pn_object(addr: saddr, port: pn_port(handle: pn->sobject)); |
387 | return put_user(handle, (__u16 __user *)arg); |
388 | } |
389 | |
390 | return sk_ioctl(sk, cmd, arg: (void __user *)arg); |
391 | } |
392 | |
393 | static int pn_socket_listen(struct socket *sock, int backlog) |
394 | { |
395 | struct sock *sk = sock->sk; |
396 | int err = 0; |
397 | |
398 | if (pn_socket_autobind(sock)) |
399 | return -ENOBUFS; |
400 | |
401 | lock_sock(sk); |
402 | if (sock->state != SS_UNCONNECTED) { |
403 | err = -EINVAL; |
404 | goto out; |
405 | } |
406 | |
407 | if (sk->sk_state != TCP_LISTEN) { |
408 | sk->sk_state = TCP_LISTEN; |
409 | sk->sk_ack_backlog = 0; |
410 | } |
411 | sk->sk_max_ack_backlog = backlog; |
412 | out: |
413 | release_sock(sk); |
414 | return err; |
415 | } |
416 | |
417 | static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m, |
418 | size_t total_len) |
419 | { |
420 | struct sock *sk = sock->sk; |
421 | |
422 | if (pn_socket_autobind(sock)) |
423 | return -EAGAIN; |
424 | |
425 | return sk->sk_prot->sendmsg(sk, m, total_len); |
426 | } |
427 | |
428 | const struct proto_ops phonet_dgram_ops = { |
429 | .family = AF_PHONET, |
430 | .owner = THIS_MODULE, |
431 | .release = pn_socket_release, |
432 | .bind = pn_socket_bind, |
433 | .connect = sock_no_connect, |
434 | .socketpair = sock_no_socketpair, |
435 | .accept = sock_no_accept, |
436 | .getname = pn_socket_getname, |
437 | .poll = datagram_poll, |
438 | .ioctl = pn_socket_ioctl, |
439 | .listen = sock_no_listen, |
440 | .shutdown = sock_no_shutdown, |
441 | .sendmsg = pn_socket_sendmsg, |
442 | .recvmsg = sock_common_recvmsg, |
443 | .mmap = sock_no_mmap, |
444 | }; |
445 | |
446 | const struct proto_ops phonet_stream_ops = { |
447 | .family = AF_PHONET, |
448 | .owner = THIS_MODULE, |
449 | .release = pn_socket_release, |
450 | .bind = pn_socket_bind, |
451 | .connect = pn_socket_connect, |
452 | .socketpair = sock_no_socketpair, |
453 | .accept = pn_socket_accept, |
454 | .getname = pn_socket_getname, |
455 | .poll = pn_socket_poll, |
456 | .ioctl = pn_socket_ioctl, |
457 | .listen = pn_socket_listen, |
458 | .shutdown = sock_no_shutdown, |
459 | .setsockopt = sock_common_setsockopt, |
460 | .getsockopt = sock_common_getsockopt, |
461 | .sendmsg = pn_socket_sendmsg, |
462 | .recvmsg = sock_common_recvmsg, |
463 | .mmap = sock_no_mmap, |
464 | }; |
465 | EXPORT_SYMBOL(phonet_stream_ops); |
466 | |
467 | /* allocate port for a socket */ |
468 | int pn_sock_get_port(struct sock *sk, unsigned short sport) |
469 | { |
470 | static int port_cur; |
471 | struct net *net = sock_net(sk); |
472 | struct pn_sock *pn = pn_sk(sk); |
473 | struct sockaddr_pn try_sa; |
474 | struct sock *tmpsk; |
475 | |
476 | memset(&try_sa, 0, sizeof(struct sockaddr_pn)); |
477 | try_sa.spn_family = AF_PHONET; |
478 | WARN_ON(!mutex_is_locked(&port_mutex)); |
479 | if (!sport) { |
480 | /* search free port */ |
481 | int port, pmin, pmax; |
482 | |
483 | phonet_get_local_port_range(min: &pmin, max: &pmax); |
484 | for (port = pmin; port <= pmax; port++) { |
485 | port_cur++; |
486 | if (port_cur < pmin || port_cur > pmax) |
487 | port_cur = pmin; |
488 | |
489 | pn_sockaddr_set_port(spn: &try_sa, port: port_cur); |
490 | tmpsk = pn_find_sock_by_sa(net, spn: &try_sa); |
491 | if (tmpsk == NULL) { |
492 | sport = port_cur; |
493 | goto found; |
494 | } else |
495 | sock_put(sk: tmpsk); |
496 | } |
497 | } else { |
498 | /* try to find specific port */ |
499 | pn_sockaddr_set_port(spn: &try_sa, port: sport); |
500 | tmpsk = pn_find_sock_by_sa(net, spn: &try_sa); |
501 | if (tmpsk == NULL) |
502 | /* No sock there! We can use that port... */ |
503 | goto found; |
504 | else |
505 | sock_put(sk: tmpsk); |
506 | } |
507 | /* the port must be in use already */ |
508 | return -EADDRINUSE; |
509 | |
510 | found: |
511 | pn->sobject = pn_object(addr: pn_addr(handle: pn->sobject), port: sport); |
512 | return 0; |
513 | } |
514 | EXPORT_SYMBOL(pn_sock_get_port); |
515 | |
516 | #ifdef CONFIG_PROC_FS |
517 | static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) |
518 | { |
519 | struct net *net = seq_file_net(seq); |
520 | struct hlist_head *hlist = pnsocks.hlist; |
521 | struct sock *sknode; |
522 | unsigned int h; |
523 | |
524 | for (h = 0; h < PN_HASHSIZE; h++) { |
525 | sk_for_each_rcu(sknode, hlist) { |
526 | if (!net_eq(net1: net, net2: sock_net(sk: sknode))) |
527 | continue; |
528 | if (!pos) |
529 | return sknode; |
530 | pos--; |
531 | } |
532 | hlist++; |
533 | } |
534 | return NULL; |
535 | } |
536 | |
537 | static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk) |
538 | { |
539 | struct net *net = seq_file_net(seq); |
540 | |
541 | do |
542 | sk = sk_next(sk); |
543 | while (sk && !net_eq(net1: net, net2: sock_net(sk))); |
544 | |
545 | return sk; |
546 | } |
547 | |
548 | static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) |
549 | __acquires(rcu) |
550 | { |
551 | rcu_read_lock(); |
552 | return *pos ? pn_sock_get_idx(seq, pos: *pos - 1) : SEQ_START_TOKEN; |
553 | } |
554 | |
555 | static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
556 | { |
557 | struct sock *sk; |
558 | |
559 | if (v == SEQ_START_TOKEN) |
560 | sk = pn_sock_get_idx(seq, pos: 0); |
561 | else |
562 | sk = pn_sock_get_next(seq, sk: v); |
563 | (*pos)++; |
564 | return sk; |
565 | } |
566 | |
567 | static void pn_sock_seq_stop(struct seq_file *seq, void *v) |
568 | __releases(rcu) |
569 | { |
570 | rcu_read_unlock(); |
571 | } |
572 | |
573 | static int pn_sock_seq_show(struct seq_file *seq, void *v) |
574 | { |
575 | seq_setwidth(m: seq, size: 127); |
576 | if (v == SEQ_START_TOKEN) |
577 | seq_puts(m: seq, s: "pt loc rem rs st tx_queue rx_queue " |
578 | " uid inode ref pointer drops" ); |
579 | else { |
580 | struct sock *sk = v; |
581 | struct pn_sock *pn = pn_sk(sk); |
582 | |
583 | seq_printf(m: seq, fmt: "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " |
584 | "%d %pK %u" , |
585 | sk->sk_protocol, pn->sobject, pn->dobject, |
586 | pn->resource, sk->sk_state, |
587 | sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), |
588 | from_kuid_munged(to: seq_user_ns(seq), uid: sock_i_uid(sk)), |
589 | sock_i_ino(sk), |
590 | refcount_read(r: &sk->sk_refcnt), sk, |
591 | atomic_read(v: &sk->sk_drops)); |
592 | } |
593 | seq_pad(m: seq, c: '\n'); |
594 | return 0; |
595 | } |
596 | |
597 | const struct seq_operations pn_sock_seq_ops = { |
598 | .start = pn_sock_seq_start, |
599 | .next = pn_sock_seq_next, |
600 | .stop = pn_sock_seq_stop, |
601 | .show = pn_sock_seq_show, |
602 | }; |
603 | #endif |
604 | |
605 | static struct { |
606 | struct sock *sk[256]; |
607 | } pnres; |
608 | |
609 | /* |
610 | * Find and hold socket based on resource. |
611 | */ |
612 | struct sock *pn_find_sock_by_res(struct net *net, u8 res) |
613 | { |
614 | struct sock *sk; |
615 | |
616 | if (!net_eq(net1: net, net2: &init_net)) |
617 | return NULL; |
618 | |
619 | rcu_read_lock(); |
620 | sk = rcu_dereference(pnres.sk[res]); |
621 | if (sk) |
622 | sock_hold(sk); |
623 | rcu_read_unlock(); |
624 | return sk; |
625 | } |
626 | |
627 | static DEFINE_MUTEX(resource_mutex); |
628 | |
629 | int pn_sock_bind_res(struct sock *sk, u8 res) |
630 | { |
631 | int ret = -EADDRINUSE; |
632 | |
633 | if (!net_eq(net1: sock_net(sk), net2: &init_net)) |
634 | return -ENOIOCTLCMD; |
635 | if (!capable(CAP_SYS_ADMIN)) |
636 | return -EPERM; |
637 | if (pn_socket_autobind(sock: sk->sk_socket)) |
638 | return -EAGAIN; |
639 | |
640 | mutex_lock(&resource_mutex); |
641 | if (pnres.sk[res] == NULL) { |
642 | sock_hold(sk); |
643 | rcu_assign_pointer(pnres.sk[res], sk); |
644 | ret = 0; |
645 | } |
646 | mutex_unlock(lock: &resource_mutex); |
647 | return ret; |
648 | } |
649 | |
650 | int pn_sock_unbind_res(struct sock *sk, u8 res) |
651 | { |
652 | int ret = -ENOENT; |
653 | |
654 | if (!capable(CAP_SYS_ADMIN)) |
655 | return -EPERM; |
656 | |
657 | mutex_lock(&resource_mutex); |
658 | if (pnres.sk[res] == sk) { |
659 | RCU_INIT_POINTER(pnres.sk[res], NULL); |
660 | ret = 0; |
661 | } |
662 | mutex_unlock(lock: &resource_mutex); |
663 | |
664 | if (ret == 0) { |
665 | synchronize_rcu(); |
666 | sock_put(sk); |
667 | } |
668 | return ret; |
669 | } |
670 | |
671 | void pn_sock_unbind_all_res(struct sock *sk) |
672 | { |
673 | unsigned int res, match = 0; |
674 | |
675 | mutex_lock(&resource_mutex); |
676 | for (res = 0; res < 256; res++) { |
677 | if (pnres.sk[res] == sk) { |
678 | RCU_INIT_POINTER(pnres.sk[res], NULL); |
679 | match++; |
680 | } |
681 | } |
682 | mutex_unlock(lock: &resource_mutex); |
683 | |
684 | while (match > 0) { |
685 | __sock_put(sk); |
686 | match--; |
687 | } |
688 | /* Caller is responsible for RCU sync before final sock_put() */ |
689 | } |
690 | |
691 | #ifdef CONFIG_PROC_FS |
692 | static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) |
693 | { |
694 | struct net *net = seq_file_net(seq); |
695 | unsigned int i; |
696 | |
697 | if (!net_eq(net1: net, net2: &init_net)) |
698 | return NULL; |
699 | |
700 | for (i = 0; i < 256; i++) { |
701 | if (pnres.sk[i] == NULL) |
702 | continue; |
703 | if (!pos) |
704 | return pnres.sk + i; |
705 | pos--; |
706 | } |
707 | return NULL; |
708 | } |
709 | |
710 | static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) |
711 | { |
712 | struct net *net = seq_file_net(seq); |
713 | unsigned int i; |
714 | |
715 | BUG_ON(!net_eq(net, &init_net)); |
716 | |
717 | for (i = (sk - pnres.sk) + 1; i < 256; i++) |
718 | if (pnres.sk[i]) |
719 | return pnres.sk + i; |
720 | return NULL; |
721 | } |
722 | |
723 | static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos) |
724 | __acquires(resource_mutex) |
725 | { |
726 | mutex_lock(&resource_mutex); |
727 | return *pos ? pn_res_get_idx(seq, pos: *pos - 1) : SEQ_START_TOKEN; |
728 | } |
729 | |
730 | static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
731 | { |
732 | struct sock **sk; |
733 | |
734 | if (v == SEQ_START_TOKEN) |
735 | sk = pn_res_get_idx(seq, pos: 0); |
736 | else |
737 | sk = pn_res_get_next(seq, sk: v); |
738 | (*pos)++; |
739 | return sk; |
740 | } |
741 | |
742 | static void pn_res_seq_stop(struct seq_file *seq, void *v) |
743 | __releases(resource_mutex) |
744 | { |
745 | mutex_unlock(lock: &resource_mutex); |
746 | } |
747 | |
748 | static int pn_res_seq_show(struct seq_file *seq, void *v) |
749 | { |
750 | seq_setwidth(m: seq, size: 63); |
751 | if (v == SEQ_START_TOKEN) |
752 | seq_puts(m: seq, s: "rs uid inode" ); |
753 | else { |
754 | struct sock **psk = v; |
755 | struct sock *sk = *psk; |
756 | |
757 | seq_printf(m: seq, fmt: "%02X %5u %lu" , |
758 | (int) (psk - pnres.sk), |
759 | from_kuid_munged(to: seq_user_ns(seq), uid: sock_i_uid(sk)), |
760 | sock_i_ino(sk)); |
761 | } |
762 | seq_pad(m: seq, c: '\n'); |
763 | return 0; |
764 | } |
765 | |
766 | const struct seq_operations pn_res_seq_ops = { |
767 | .start = pn_res_seq_start, |
768 | .next = pn_res_seq_next, |
769 | .stop = pn_res_seq_stop, |
770 | .show = pn_res_seq_show, |
771 | }; |
772 | #endif |
773 | |