1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* RxRPC individual remote procedure call handling |
3 | * |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/slab.h> |
11 | #include <linux/module.h> |
12 | #include <linux/circ_buf.h> |
13 | #include <linux/spinlock_types.h> |
14 | #include <net/sock.h> |
15 | #include <net/af_rxrpc.h> |
16 | #include "ar-internal.h" |
17 | |
18 | const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { |
19 | [RXRPC_CALL_UNINITIALISED] = "Uninit " , |
20 | [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn" , |
21 | [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq" , |
22 | [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl" , |
23 | [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl" , |
24 | [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc" , |
25 | [RXRPC_CALL_SERVER_SECURING] = "SvSecure" , |
26 | [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq" , |
27 | [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq" , |
28 | [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl" , |
29 | [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK" , |
30 | [RXRPC_CALL_COMPLETE] = "Complete" , |
31 | }; |
32 | |
33 | const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { |
34 | [RXRPC_CALL_SUCCEEDED] = "Complete" , |
35 | [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort" , |
36 | [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort" , |
37 | [RXRPC_CALL_LOCAL_ERROR] = "LocError" , |
38 | [RXRPC_CALL_NETWORK_ERROR] = "NetError" , |
39 | }; |
40 | |
41 | struct kmem_cache *rxrpc_call_jar; |
42 | |
43 | static DEFINE_SEMAPHORE(rxrpc_call_limiter, 1000); |
44 | static DEFINE_SEMAPHORE(rxrpc_kernel_call_limiter, 1000); |
45 | |
46 | void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what) |
47 | { |
48 | struct rxrpc_local *local = call->local; |
49 | bool busy; |
50 | |
51 | if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) { |
52 | spin_lock_bh(lock: &local->lock); |
53 | busy = !list_empty(head: &call->attend_link); |
54 | trace_rxrpc_poke_call(call, busy, what); |
55 | if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke)) |
56 | busy = true; |
57 | if (!busy) { |
58 | list_add_tail(new: &call->attend_link, head: &local->call_attend_q); |
59 | } |
60 | spin_unlock_bh(lock: &local->lock); |
61 | if (!busy) |
62 | rxrpc_wake_up_io_thread(local); |
63 | } |
64 | } |
65 | |
66 | static void rxrpc_call_timer_expired(struct timer_list *t) |
67 | { |
68 | struct rxrpc_call *call = from_timer(call, t, timer); |
69 | |
70 | _enter("%d" , call->debug_id); |
71 | |
72 | if (!__rxrpc_call_is_complete(call)) { |
73 | trace_rxrpc_timer_expired(call, now: jiffies); |
74 | rxrpc_poke_call(call, what: rxrpc_call_poke_timer); |
75 | } |
76 | } |
77 | |
78 | void rxrpc_reduce_call_timer(struct rxrpc_call *call, |
79 | unsigned long expire_at, |
80 | unsigned long now, |
81 | enum rxrpc_timer_trace why) |
82 | { |
83 | trace_rxrpc_timer(call, why, now); |
84 | timer_reduce(timer: &call->timer, expires: expire_at); |
85 | } |
86 | |
87 | static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; |
88 | |
89 | static void rxrpc_destroy_call(struct work_struct *); |
90 | |
91 | /* |
92 | * find an extant server call |
93 | * - called in process context with IRQs enabled |
94 | */ |
95 | struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, |
96 | unsigned long user_call_ID) |
97 | { |
98 | struct rxrpc_call *call; |
99 | struct rb_node *p; |
100 | |
101 | _enter("%p,%lx" , rx, user_call_ID); |
102 | |
103 | read_lock(&rx->call_lock); |
104 | |
105 | p = rx->calls.rb_node; |
106 | while (p) { |
107 | call = rb_entry(p, struct rxrpc_call, sock_node); |
108 | |
109 | if (user_call_ID < call->user_call_ID) |
110 | p = p->rb_left; |
111 | else if (user_call_ID > call->user_call_ID) |
112 | p = p->rb_right; |
113 | else |
114 | goto found_extant_call; |
115 | } |
116 | |
117 | read_unlock(&rx->call_lock); |
118 | _leave(" = NULL" ); |
119 | return NULL; |
120 | |
121 | found_extant_call: |
122 | rxrpc_get_call(call, rxrpc_call_get_sendmsg); |
123 | read_unlock(&rx->call_lock); |
124 | _leave(" = %p [%d]" , call, refcount_read(&call->ref)); |
125 | return call; |
126 | } |
127 | |
128 | /* |
129 | * allocate a new call |
130 | */ |
131 | struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, |
132 | unsigned int debug_id) |
133 | { |
134 | struct rxrpc_call *call; |
135 | struct rxrpc_net *rxnet = rxrpc_net(net: sock_net(sk: &rx->sk)); |
136 | |
137 | call = kmem_cache_zalloc(k: rxrpc_call_jar, flags: gfp); |
138 | if (!call) |
139 | return NULL; |
140 | |
141 | mutex_init(&call->user_mutex); |
142 | |
143 | /* Prevent lockdep reporting a deadlock false positive between the afs |
144 | * filesystem and sys_sendmsg() via the mmap sem. |
145 | */ |
146 | if (rx->sk.sk_kern_sock) |
147 | lockdep_set_class(&call->user_mutex, |
148 | &rxrpc_call_user_mutex_lock_class_key); |
149 | |
150 | timer_setup(&call->timer, rxrpc_call_timer_expired, 0); |
151 | INIT_WORK(&call->destroyer, rxrpc_destroy_call); |
152 | INIT_LIST_HEAD(list: &call->link); |
153 | INIT_LIST_HEAD(list: &call->wait_link); |
154 | INIT_LIST_HEAD(list: &call->accept_link); |
155 | INIT_LIST_HEAD(list: &call->recvmsg_link); |
156 | INIT_LIST_HEAD(list: &call->sock_link); |
157 | INIT_LIST_HEAD(list: &call->attend_link); |
158 | INIT_LIST_HEAD(list: &call->tx_sendmsg); |
159 | INIT_LIST_HEAD(list: &call->tx_buffer); |
160 | skb_queue_head_init(list: &call->recvmsg_queue); |
161 | skb_queue_head_init(list: &call->rx_oos_queue); |
162 | init_waitqueue_head(&call->waitq); |
163 | spin_lock_init(&call->notify_lock); |
164 | spin_lock_init(&call->tx_lock); |
165 | refcount_set(r: &call->ref, n: 1); |
166 | call->debug_id = debug_id; |
167 | call->tx_total_len = -1; |
168 | call->next_rx_timo = 20 * HZ; |
169 | call->next_req_timo = 1 * HZ; |
170 | call->ackr_window = 1; |
171 | call->ackr_wtop = 1; |
172 | |
173 | memset(&call->sock_node, 0xed, sizeof(call->sock_node)); |
174 | |
175 | call->rx_winsize = rxrpc_rx_window_size; |
176 | call->tx_winsize = 16; |
177 | |
178 | if (RXRPC_TX_SMSS > 2190) |
179 | call->cong_cwnd = 2; |
180 | else if (RXRPC_TX_SMSS > 1095) |
181 | call->cong_cwnd = 3; |
182 | else |
183 | call->cong_cwnd = 4; |
184 | call->cong_ssthresh = RXRPC_TX_MAX_WINDOW; |
185 | |
186 | call->rxnet = rxnet; |
187 | call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK; |
188 | atomic_inc(v: &rxnet->nr_calls); |
189 | return call; |
190 | } |
191 | |
192 | /* |
193 | * Allocate a new client call. |
194 | */ |
195 | static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, |
196 | struct sockaddr_rxrpc *srx, |
197 | struct rxrpc_conn_parameters *cp, |
198 | struct rxrpc_call_params *p, |
199 | gfp_t gfp, |
200 | unsigned int debug_id) |
201 | { |
202 | struct rxrpc_call *call; |
203 | ktime_t now; |
204 | int ret; |
205 | |
206 | _enter("" ); |
207 | |
208 | call = rxrpc_alloc_call(rx, gfp, debug_id); |
209 | if (!call) |
210 | return ERR_PTR(error: -ENOMEM); |
211 | now = ktime_get_real(); |
212 | call->acks_latest_ts = now; |
213 | call->cong_tstamp = now; |
214 | call->dest_srx = *srx; |
215 | call->interruptibility = p->interruptibility; |
216 | call->tx_total_len = p->tx_total_len; |
217 | call->key = key_get(key: cp->key); |
218 | call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call); |
219 | call->security_level = cp->security_level; |
220 | if (p->kernel) |
221 | __set_bit(RXRPC_CALL_KERNEL, &call->flags); |
222 | if (cp->upgrade) |
223 | __set_bit(RXRPC_CALL_UPGRADE, &call->flags); |
224 | if (cp->exclusive) |
225 | __set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); |
226 | |
227 | if (p->timeouts.normal) |
228 | call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL); |
229 | if (p->timeouts.idle) |
230 | call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL); |
231 | if (p->timeouts.hard) |
232 | call->hard_timo = p->timeouts.hard * HZ; |
233 | |
234 | ret = rxrpc_init_client_call_security(call); |
235 | if (ret < 0) { |
236 | rxrpc_prefail_call(call, compl: RXRPC_CALL_LOCAL_ERROR, error: ret); |
237 | rxrpc_put_call(call, rxrpc_call_put_discard_error); |
238 | return ERR_PTR(error: ret); |
239 | } |
240 | |
241 | rxrpc_set_call_state(call, state: RXRPC_CALL_CLIENT_AWAIT_CONN); |
242 | |
243 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: refcount_read(r: &call->ref), |
244 | aux: p->user_call_ID, why: rxrpc_call_new_client); |
245 | |
246 | _leave(" = %p" , call); |
247 | return call; |
248 | } |
249 | |
250 | /* |
251 | * Initiate the call ack/resend/expiry timer. |
252 | */ |
253 | void rxrpc_start_call_timer(struct rxrpc_call *call) |
254 | { |
255 | unsigned long now = jiffies; |
256 | unsigned long j = now + MAX_JIFFY_OFFSET; |
257 | |
258 | call->delay_ack_at = j; |
259 | call->ack_lost_at = j; |
260 | call->resend_at = j; |
261 | call->ping_at = j; |
262 | call->keepalive_at = j; |
263 | call->expect_rx_by = j; |
264 | call->expect_req_by = j; |
265 | call->expect_term_by = j + call->hard_timo; |
266 | call->timer.expires = now; |
267 | } |
268 | |
269 | /* |
270 | * Wait for a call slot to become available. |
271 | */ |
272 | static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp) |
273 | { |
274 | struct semaphore *limiter = &rxrpc_call_limiter; |
275 | |
276 | if (p->kernel) |
277 | limiter = &rxrpc_kernel_call_limiter; |
278 | if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) { |
279 | down(sem: limiter); |
280 | return limiter; |
281 | } |
282 | return down_interruptible(sem: limiter) < 0 ? NULL : limiter; |
283 | } |
284 | |
285 | /* |
286 | * Release a call slot. |
287 | */ |
288 | static void rxrpc_put_call_slot(struct rxrpc_call *call) |
289 | { |
290 | struct semaphore *limiter = &rxrpc_call_limiter; |
291 | |
292 | if (test_bit(RXRPC_CALL_KERNEL, &call->flags)) |
293 | limiter = &rxrpc_kernel_call_limiter; |
294 | up(sem: limiter); |
295 | } |
296 | |
297 | /* |
298 | * Start the process of connecting a call. We obtain a peer and a connection |
299 | * bundle, but the actual association of a call with a connection is offloaded |
300 | * to the I/O thread to simplify locking. |
301 | */ |
302 | static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp) |
303 | { |
304 | struct rxrpc_local *local = call->local; |
305 | int ret = -ENOMEM; |
306 | |
307 | _enter("{%d,%lx}," , call->debug_id, call->user_call_ID); |
308 | |
309 | call->peer = rxrpc_lookup_peer(local, srx: &call->dest_srx, gfp); |
310 | if (!call->peer) |
311 | goto error; |
312 | |
313 | ret = rxrpc_look_up_bundle(call, gfp); |
314 | if (ret < 0) |
315 | goto error; |
316 | |
317 | trace_rxrpc_client(NULL, channel: -1, op: rxrpc_client_queue_new_call); |
318 | rxrpc_get_call(call, rxrpc_call_get_io_thread); |
319 | spin_lock(lock: &local->client_call_lock); |
320 | list_add_tail(new: &call->wait_link, head: &local->new_client_calls); |
321 | spin_unlock(lock: &local->client_call_lock); |
322 | rxrpc_wake_up_io_thread(local); |
323 | return 0; |
324 | |
325 | error: |
326 | __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); |
327 | return ret; |
328 | } |
329 | |
330 | /* |
331 | * Set up a call for the given parameters. |
332 | * - Called with the socket lock held, which it must release. |
333 | * - If it returns a call, the call's lock will need releasing by the caller. |
334 | */ |
335 | struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, |
336 | struct rxrpc_conn_parameters *cp, |
337 | struct sockaddr_rxrpc *srx, |
338 | struct rxrpc_call_params *p, |
339 | gfp_t gfp, |
340 | unsigned int debug_id) |
341 | __releases(&rx->sk.sk_lock.slock) |
342 | __acquires(&call->user_mutex) |
343 | { |
344 | struct rxrpc_call *call, *xcall; |
345 | struct rxrpc_net *rxnet; |
346 | struct semaphore *limiter; |
347 | struct rb_node *parent, **pp; |
348 | int ret; |
349 | |
350 | _enter("%p,%lx" , rx, p->user_call_ID); |
351 | |
352 | limiter = rxrpc_get_call_slot(p, gfp); |
353 | if (!limiter) { |
354 | release_sock(sk: &rx->sk); |
355 | return ERR_PTR(error: -ERESTARTSYS); |
356 | } |
357 | |
358 | call = rxrpc_alloc_client_call(rx, srx, cp, p, gfp, debug_id); |
359 | if (IS_ERR(ptr: call)) { |
360 | release_sock(sk: &rx->sk); |
361 | up(sem: limiter); |
362 | _leave(" = %ld" , PTR_ERR(call)); |
363 | return call; |
364 | } |
365 | |
366 | /* We need to protect a partially set up call against the user as we |
367 | * will be acting outside the socket lock. |
368 | */ |
369 | mutex_lock(&call->user_mutex); |
370 | |
371 | /* Publish the call, even though it is incompletely set up as yet */ |
372 | write_lock(&rx->call_lock); |
373 | |
374 | pp = &rx->calls.rb_node; |
375 | parent = NULL; |
376 | while (*pp) { |
377 | parent = *pp; |
378 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
379 | |
380 | if (p->user_call_ID < xcall->user_call_ID) |
381 | pp = &(*pp)->rb_left; |
382 | else if (p->user_call_ID > xcall->user_call_ID) |
383 | pp = &(*pp)->rb_right; |
384 | else |
385 | goto error_dup_user_ID; |
386 | } |
387 | |
388 | rcu_assign_pointer(call->socket, rx); |
389 | call->user_call_ID = p->user_call_ID; |
390 | __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); |
391 | rxrpc_get_call(call, rxrpc_call_get_userid); |
392 | rb_link_node(node: &call->sock_node, parent, rb_link: pp); |
393 | rb_insert_color(&call->sock_node, &rx->calls); |
394 | list_add(new: &call->sock_link, head: &rx->sock_calls); |
395 | |
396 | write_unlock(&rx->call_lock); |
397 | |
398 | rxnet = call->rxnet; |
399 | spin_lock(lock: &rxnet->call_lock); |
400 | list_add_tail_rcu(new: &call->link, head: &rxnet->calls); |
401 | spin_unlock(lock: &rxnet->call_lock); |
402 | |
403 | /* From this point on, the call is protected by its own lock. */ |
404 | release_sock(sk: &rx->sk); |
405 | |
406 | /* Set up or get a connection record and set the protocol parameters, |
407 | * including channel number and call ID. |
408 | */ |
409 | ret = rxrpc_connect_call(call, gfp); |
410 | if (ret < 0) |
411 | goto error_attached_to_socket; |
412 | |
413 | _leave(" = %p [new]" , call); |
414 | return call; |
415 | |
416 | /* We unexpectedly found the user ID in the list after taking |
417 | * the call_lock. This shouldn't happen unless the user races |
418 | * with itself and tries to add the same user ID twice at the |
419 | * same time in different threads. |
420 | */ |
421 | error_dup_user_ID: |
422 | write_unlock(&rx->call_lock); |
423 | release_sock(sk: &rx->sk); |
424 | rxrpc_prefail_call(call, compl: RXRPC_CALL_LOCAL_ERROR, error: -EEXIST); |
425 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: refcount_read(r: &call->ref), aux: 0, |
426 | why: rxrpc_call_see_userid_exists); |
427 | mutex_unlock(lock: &call->user_mutex); |
428 | rxrpc_put_call(call, rxrpc_call_put_userid_exists); |
429 | _leave(" = -EEXIST" ); |
430 | return ERR_PTR(error: -EEXIST); |
431 | |
432 | /* We got an error, but the call is attached to the socket and is in |
433 | * need of release. However, we might now race with recvmsg() when it |
434 | * completion notifies the socket. Return 0 from sys_sendmsg() and |
435 | * leave the error to recvmsg() to deal with. |
436 | */ |
437 | error_attached_to_socket: |
438 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: refcount_read(r: &call->ref), aux: ret, |
439 | why: rxrpc_call_see_connect_failed); |
440 | rxrpc_set_call_completion(call, compl: RXRPC_CALL_LOCAL_ERROR, abort_code: 0, error: ret); |
441 | _leave(" = c=%08x [err]" , call->debug_id); |
442 | return call; |
443 | } |
444 | |
445 | /* |
446 | * Set up an incoming call. call->conn points to the connection. |
447 | * This is called in BH context and isn't allowed to fail. |
448 | */ |
449 | void rxrpc_incoming_call(struct rxrpc_sock *rx, |
450 | struct rxrpc_call *call, |
451 | struct sk_buff *skb) |
452 | { |
453 | struct rxrpc_connection *conn = call->conn; |
454 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
455 | u32 chan; |
456 | |
457 | _enter(",%d" , call->conn->debug_id); |
458 | |
459 | rcu_assign_pointer(call->socket, rx); |
460 | call->call_id = sp->hdr.callNumber; |
461 | call->dest_srx.srx_service = sp->hdr.serviceId; |
462 | call->cid = sp->hdr.cid; |
463 | call->cong_tstamp = skb->tstamp; |
464 | |
465 | __set_bit(RXRPC_CALL_EXPOSED, &call->flags); |
466 | rxrpc_set_call_state(call, state: RXRPC_CALL_SERVER_SECURING); |
467 | |
468 | spin_lock(lock: &conn->state_lock); |
469 | |
470 | switch (conn->state) { |
471 | case RXRPC_CONN_SERVICE_UNSECURED: |
472 | case RXRPC_CONN_SERVICE_CHALLENGING: |
473 | rxrpc_set_call_state(call, state: RXRPC_CALL_SERVER_SECURING); |
474 | break; |
475 | case RXRPC_CONN_SERVICE: |
476 | rxrpc_set_call_state(call, state: RXRPC_CALL_SERVER_RECV_REQUEST); |
477 | break; |
478 | |
479 | case RXRPC_CONN_ABORTED: |
480 | rxrpc_set_call_completion(call, compl: conn->completion, |
481 | abort_code: conn->abort_code, error: conn->error); |
482 | break; |
483 | default: |
484 | BUG(); |
485 | } |
486 | |
487 | rxrpc_get_call(call, rxrpc_call_get_io_thread); |
488 | |
489 | /* Set the channel for this call. We don't get channel_lock as we're |
490 | * only defending against the data_ready handler (which we're called |
491 | * from) and the RESPONSE packet parser (which is only really |
492 | * interested in call_counter and can cope with a disagreement with the |
493 | * call pointer). |
494 | */ |
495 | chan = sp->hdr.cid & RXRPC_CHANNELMASK; |
496 | conn->channels[chan].call_counter = call->call_id; |
497 | conn->channels[chan].call_id = call->call_id; |
498 | conn->channels[chan].call = call; |
499 | spin_unlock(lock: &conn->state_lock); |
500 | |
501 | spin_lock(lock: &conn->peer->lock); |
502 | hlist_add_head(n: &call->error_link, h: &conn->peer->error_targets); |
503 | spin_unlock(lock: &conn->peer->lock); |
504 | |
505 | rxrpc_start_call_timer(call); |
506 | _leave("" ); |
507 | } |
508 | |
509 | /* |
510 | * Note the re-emergence of a call. |
511 | */ |
512 | void rxrpc_see_call(struct rxrpc_call *call, enum rxrpc_call_trace why) |
513 | { |
514 | if (call) { |
515 | int r = refcount_read(r: &call->ref); |
516 | |
517 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: r, aux: 0, why); |
518 | } |
519 | } |
520 | |
521 | struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *call, |
522 | enum rxrpc_call_trace why) |
523 | { |
524 | int r; |
525 | |
526 | if (!call || !__refcount_inc_not_zero(r: &call->ref, oldp: &r)) |
527 | return NULL; |
528 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: r + 1, aux: 0, why); |
529 | return call; |
530 | } |
531 | |
532 | /* |
533 | * Note the addition of a ref on a call. |
534 | */ |
535 | void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why) |
536 | { |
537 | int r; |
538 | |
539 | __refcount_inc(r: &call->ref, oldp: &r); |
540 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: r + 1, aux: 0, why); |
541 | } |
542 | |
543 | /* |
544 | * Clean up the Rx skb ring. |
545 | */ |
546 | static void rxrpc_cleanup_ring(struct rxrpc_call *call) |
547 | { |
548 | skb_queue_purge(list: &call->recvmsg_queue); |
549 | skb_queue_purge(list: &call->rx_oos_queue); |
550 | } |
551 | |
552 | /* |
553 | * Detach a call from its owning socket. |
554 | */ |
555 | void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) |
556 | { |
557 | struct rxrpc_connection *conn = call->conn; |
558 | bool put = false, putu = false; |
559 | |
560 | _enter("{%d,%d}" , call->debug_id, refcount_read(&call->ref)); |
561 | |
562 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: refcount_read(r: &call->ref), |
563 | aux: call->flags, why: rxrpc_call_see_release); |
564 | |
565 | if (test_and_set_bit(nr: RXRPC_CALL_RELEASED, addr: &call->flags)) |
566 | BUG(); |
567 | |
568 | rxrpc_put_call_slot(call); |
569 | |
570 | /* Make sure we don't get any more notifications */ |
571 | spin_lock(lock: &rx->recvmsg_lock); |
572 | |
573 | if (!list_empty(head: &call->recvmsg_link)) { |
574 | _debug("unlinking once-pending call %p { e=%lx f=%lx }" , |
575 | call, call->events, call->flags); |
576 | list_del(entry: &call->recvmsg_link); |
577 | put = true; |
578 | } |
579 | |
580 | /* list_empty() must return false in rxrpc_notify_socket() */ |
581 | call->recvmsg_link.next = NULL; |
582 | call->recvmsg_link.prev = NULL; |
583 | |
584 | spin_unlock(lock: &rx->recvmsg_lock); |
585 | if (put) |
586 | rxrpc_put_call(call, rxrpc_call_put_unnotify); |
587 | |
588 | write_lock(&rx->call_lock); |
589 | |
590 | if (test_and_clear_bit(nr: RXRPC_CALL_HAS_USERID, addr: &call->flags)) { |
591 | rb_erase(&call->sock_node, &rx->calls); |
592 | memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); |
593 | putu = true; |
594 | } |
595 | |
596 | list_del(entry: &call->sock_link); |
597 | write_unlock(&rx->call_lock); |
598 | |
599 | _debug("RELEASE CALL %p (%d CONN %p)" , call, call->debug_id, conn); |
600 | |
601 | if (putu) |
602 | rxrpc_put_call(call, rxrpc_call_put_userid); |
603 | |
604 | _leave("" ); |
605 | } |
606 | |
607 | /* |
608 | * release all the calls associated with a socket |
609 | */ |
610 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) |
611 | { |
612 | struct rxrpc_call *call; |
613 | |
614 | _enter("%p" , rx); |
615 | |
616 | while (!list_empty(head: &rx->to_be_accepted)) { |
617 | call = list_entry(rx->to_be_accepted.next, |
618 | struct rxrpc_call, accept_link); |
619 | list_del(entry: &call->accept_link); |
620 | rxrpc_propose_abort(call, RX_CALL_DEAD, error: -ECONNRESET, |
621 | why: rxrpc_abort_call_sock_release_tba); |
622 | rxrpc_put_call(call, rxrpc_call_put_release_sock_tba); |
623 | } |
624 | |
625 | while (!list_empty(head: &rx->sock_calls)) { |
626 | call = list_entry(rx->sock_calls.next, |
627 | struct rxrpc_call, sock_link); |
628 | rxrpc_get_call(call, why: rxrpc_call_get_release_sock); |
629 | rxrpc_propose_abort(call, RX_CALL_DEAD, error: -ECONNRESET, |
630 | why: rxrpc_abort_call_sock_release); |
631 | rxrpc_release_call(rx, call); |
632 | rxrpc_put_call(call, rxrpc_call_put_release_sock); |
633 | } |
634 | |
635 | _leave("" ); |
636 | } |
637 | |
638 | /* |
639 | * release a call |
640 | */ |
641 | void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why) |
642 | { |
643 | struct rxrpc_net *rxnet = call->rxnet; |
644 | unsigned int debug_id = call->debug_id; |
645 | bool dead; |
646 | int r; |
647 | |
648 | ASSERT(call != NULL); |
649 | |
650 | dead = __refcount_dec_and_test(r: &call->ref, oldp: &r); |
651 | trace_rxrpc_call(call_debug_id: debug_id, ref: r - 1, aux: 0, why); |
652 | if (dead) { |
653 | ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); |
654 | |
655 | if (!list_empty(head: &call->link)) { |
656 | spin_lock(lock: &rxnet->call_lock); |
657 | list_del_init(entry: &call->link); |
658 | spin_unlock(lock: &rxnet->call_lock); |
659 | } |
660 | |
661 | rxrpc_cleanup_call(call); |
662 | } |
663 | } |
664 | |
665 | /* |
666 | * Free up the call under RCU. |
667 | */ |
668 | static void rxrpc_rcu_free_call(struct rcu_head *rcu) |
669 | { |
670 | struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); |
671 | struct rxrpc_net *rxnet = READ_ONCE(call->rxnet); |
672 | |
673 | kmem_cache_free(s: rxrpc_call_jar, objp: call); |
674 | if (atomic_dec_and_test(v: &rxnet->nr_calls)) |
675 | wake_up_var(var: &rxnet->nr_calls); |
676 | } |
677 | |
678 | /* |
679 | * Final call destruction - but must be done in process context. |
680 | */ |
681 | static void rxrpc_destroy_call(struct work_struct *work) |
682 | { |
683 | struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer); |
684 | struct rxrpc_txbuf *txb; |
685 | |
686 | del_timer_sync(timer: &call->timer); |
687 | |
688 | rxrpc_cleanup_ring(call); |
689 | while ((txb = list_first_entry_or_null(&call->tx_sendmsg, |
690 | struct rxrpc_txbuf, call_link))) { |
691 | list_del(entry: &txb->call_link); |
692 | rxrpc_put_txbuf(txb, what: rxrpc_txbuf_put_cleaned); |
693 | } |
694 | while ((txb = list_first_entry_or_null(&call->tx_buffer, |
695 | struct rxrpc_txbuf, call_link))) { |
696 | list_del(entry: &txb->call_link); |
697 | rxrpc_put_txbuf(txb, what: rxrpc_txbuf_put_cleaned); |
698 | } |
699 | |
700 | rxrpc_put_txbuf(txb: call->tx_pending, what: rxrpc_txbuf_put_cleaned); |
701 | rxrpc_put_connection(call->conn, rxrpc_conn_put_call); |
702 | rxrpc_deactivate_bundle(bundle: call->bundle); |
703 | rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call); |
704 | rxrpc_put_peer(call->peer, rxrpc_peer_put_call); |
705 | rxrpc_put_local(call->local, rxrpc_local_put_call); |
706 | call_rcu(head: &call->rcu, func: rxrpc_rcu_free_call); |
707 | } |
708 | |
709 | /* |
710 | * clean up a call |
711 | */ |
712 | void rxrpc_cleanup_call(struct rxrpc_call *call) |
713 | { |
714 | memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); |
715 | |
716 | ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); |
717 | ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); |
718 | |
719 | del_timer(timer: &call->timer); |
720 | |
721 | if (rcu_read_lock_held()) |
722 | /* Can't use the rxrpc workqueue as we need to cancel/flush |
723 | * something that may be running/waiting there. |
724 | */ |
725 | schedule_work(work: &call->destroyer); |
726 | else |
727 | rxrpc_destroy_call(work: &call->destroyer); |
728 | } |
729 | |
730 | /* |
731 | * Make sure that all calls are gone from a network namespace. To reach this |
732 | * point, any open UDP sockets in that namespace must have been closed, so any |
733 | * outstanding calls cannot be doing I/O. |
734 | */ |
735 | void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) |
736 | { |
737 | struct rxrpc_call *call; |
738 | |
739 | _enter("" ); |
740 | |
741 | if (!list_empty(head: &rxnet->calls)) { |
742 | spin_lock(lock: &rxnet->call_lock); |
743 | |
744 | while (!list_empty(head: &rxnet->calls)) { |
745 | call = list_entry(rxnet->calls.next, |
746 | struct rxrpc_call, link); |
747 | _debug("Zapping call %p" , call); |
748 | |
749 | rxrpc_see_call(call, why: rxrpc_call_see_zap); |
750 | list_del_init(entry: &call->link); |
751 | |
752 | pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n" , |
753 | call, refcount_read(&call->ref), |
754 | rxrpc_call_states[__rxrpc_call_state(call)], |
755 | call->flags, call->events); |
756 | |
757 | spin_unlock(lock: &rxnet->call_lock); |
758 | cond_resched(); |
759 | spin_lock(lock: &rxnet->call_lock); |
760 | } |
761 | |
762 | spin_unlock(lock: &rxnet->call_lock); |
763 | } |
764 | |
765 | atomic_dec(v: &rxnet->nr_calls); |
766 | wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls)); |
767 | } |
768 | |