1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/net/sunrpc/sched.c |
4 | * |
5 | * Scheduling for synchronous and asynchronous RPC requests. |
6 | * |
7 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> |
8 | * |
9 | * TCP NFS related read + write fixes |
10 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> |
11 | */ |
12 | |
13 | #include <linux/module.h> |
14 | |
15 | #include <linux/sched.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/mempool.h> |
19 | #include <linux/smp.h> |
20 | #include <linux/spinlock.h> |
21 | #include <linux/mutex.h> |
22 | #include <linux/freezer.h> |
23 | #include <linux/sched/mm.h> |
24 | |
25 | #include <linux/sunrpc/clnt.h> |
26 | #include <linux/sunrpc/metrics.h> |
27 | |
28 | #include "sunrpc.h" |
29 | |
30 | #define CREATE_TRACE_POINTS |
31 | #include <trace/events/sunrpc.h> |
32 | |
33 | /* |
34 | * RPC slabs and memory pools |
35 | */ |
36 | #define RPC_BUFFER_MAXSIZE (2048) |
37 | #define RPC_BUFFER_POOLSIZE (8) |
38 | #define RPC_TASK_POOLSIZE (8) |
39 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
40 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; |
41 | static mempool_t *rpc_task_mempool __read_mostly; |
42 | static mempool_t *rpc_buffer_mempool __read_mostly; |
43 | |
44 | static void rpc_async_schedule(struct work_struct *); |
45 | static void rpc_release_task(struct rpc_task *task); |
46 | static void __rpc_queue_timer_fn(struct work_struct *); |
47 | |
48 | /* |
49 | * RPC tasks sit here while waiting for conditions to improve. |
50 | */ |
51 | static struct rpc_wait_queue delay_queue; |
52 | |
53 | /* |
54 | * rpciod-related stuff |
55 | */ |
56 | struct workqueue_struct *rpciod_workqueue __read_mostly; |
57 | struct workqueue_struct *xprtiod_workqueue __read_mostly; |
58 | EXPORT_SYMBOL_GPL(xprtiod_workqueue); |
59 | |
60 | gfp_t rpc_task_gfp_mask(void) |
61 | { |
62 | if (current->flags & PF_WQ_WORKER) |
63 | return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; |
64 | return GFP_KERNEL; |
65 | } |
66 | EXPORT_SYMBOL_GPL(rpc_task_gfp_mask); |
67 | |
68 | bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status) |
69 | { |
70 | if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0) |
71 | return true; |
72 | return false; |
73 | } |
74 | |
75 | unsigned long |
76 | rpc_task_timeout(const struct rpc_task *task) |
77 | { |
78 | unsigned long timeout = READ_ONCE(task->tk_timeout); |
79 | |
80 | if (timeout != 0) { |
81 | unsigned long now = jiffies; |
82 | if (time_before(now, timeout)) |
83 | return timeout - now; |
84 | } |
85 | return 0; |
86 | } |
87 | EXPORT_SYMBOL_GPL(rpc_task_timeout); |
88 | |
89 | /* |
90 | * Disable the timer for a given RPC task. Should be called with |
91 | * queue->lock and bh_disabled in order to avoid races within |
92 | * rpc_run_timer(). |
93 | */ |
94 | static void |
95 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
96 | { |
97 | if (list_empty(head: &task->u.tk_wait.timer_list)) |
98 | return; |
99 | task->tk_timeout = 0; |
100 | list_del(entry: &task->u.tk_wait.timer_list); |
101 | if (list_empty(head: &queue->timer_list.list)) |
102 | cancel_delayed_work(dwork: &queue->timer_list.dwork); |
103 | } |
104 | |
105 | static void |
106 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) |
107 | { |
108 | unsigned long now = jiffies; |
109 | queue->timer_list.expires = expires; |
110 | if (time_before_eq(expires, now)) |
111 | expires = 0; |
112 | else |
113 | expires -= now; |
114 | mod_delayed_work(wq: rpciod_workqueue, dwork: &queue->timer_list.dwork, delay: expires); |
115 | } |
116 | |
117 | /* |
118 | * Set up a timer for the current task. |
119 | */ |
120 | static void |
121 | __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, |
122 | unsigned long timeout) |
123 | { |
124 | task->tk_timeout = timeout; |
125 | if (list_empty(head: &queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) |
126 | rpc_set_queue_timer(queue, expires: timeout); |
127 | list_add(new: &task->u.tk_wait.timer_list, head: &queue->timer_list.list); |
128 | } |
129 | |
130 | static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
131 | { |
132 | if (queue->priority != priority) { |
133 | queue->priority = priority; |
134 | queue->nr = 1U << priority; |
135 | } |
136 | } |
137 | |
138 | static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) |
139 | { |
140 | rpc_set_waitqueue_priority(queue, priority: queue->maxpriority); |
141 | } |
142 | |
143 | /* |
144 | * Add a request to a queue list |
145 | */ |
146 | static void |
147 | __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) |
148 | { |
149 | struct rpc_task *t; |
150 | |
151 | list_for_each_entry(t, q, u.tk_wait.list) { |
152 | if (t->tk_owner == task->tk_owner) { |
153 | list_add_tail(new: &task->u.tk_wait.links, |
154 | head: &t->u.tk_wait.links); |
155 | /* Cache the queue head in task->u.tk_wait.list */ |
156 | task->u.tk_wait.list.next = q; |
157 | task->u.tk_wait.list.prev = NULL; |
158 | return; |
159 | } |
160 | } |
161 | INIT_LIST_HEAD(list: &task->u.tk_wait.links); |
162 | list_add_tail(new: &task->u.tk_wait.list, head: q); |
163 | } |
164 | |
165 | /* |
166 | * Remove request from a queue list |
167 | */ |
168 | static void |
169 | __rpc_list_dequeue_task(struct rpc_task *task) |
170 | { |
171 | struct list_head *q; |
172 | struct rpc_task *t; |
173 | |
174 | if (task->u.tk_wait.list.prev == NULL) { |
175 | list_del(entry: &task->u.tk_wait.links); |
176 | return; |
177 | } |
178 | if (!list_empty(head: &task->u.tk_wait.links)) { |
179 | t = list_first_entry(&task->u.tk_wait.links, |
180 | struct rpc_task, |
181 | u.tk_wait.links); |
182 | /* Assume __rpc_list_enqueue_task() cached the queue head */ |
183 | q = t->u.tk_wait.list.next; |
184 | list_add_tail(new: &t->u.tk_wait.list, head: q); |
185 | list_del(entry: &task->u.tk_wait.links); |
186 | } |
187 | list_del(entry: &task->u.tk_wait.list); |
188 | } |
189 | |
190 | /* |
191 | * Add new request to a priority queue. |
192 | */ |
193 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, |
194 | struct rpc_task *task, |
195 | unsigned char queue_priority) |
196 | { |
197 | if (unlikely(queue_priority > queue->maxpriority)) |
198 | queue_priority = queue->maxpriority; |
199 | __rpc_list_enqueue_task(q: &queue->tasks[queue_priority], task); |
200 | } |
201 | |
202 | /* |
203 | * Add new request to wait queue. |
204 | */ |
205 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
206 | struct rpc_task *task, |
207 | unsigned char queue_priority) |
208 | { |
209 | INIT_LIST_HEAD(list: &task->u.tk_wait.timer_list); |
210 | if (RPC_IS_PRIORITY(queue)) |
211 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
212 | else |
213 | list_add_tail(new: &task->u.tk_wait.list, head: &queue->tasks[0]); |
214 | task->tk_waitqueue = queue; |
215 | queue->qlen++; |
216 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ |
217 | smp_wmb(); |
218 | rpc_set_queued(task); |
219 | } |
220 | |
221 | /* |
222 | * Remove request from a priority queue. |
223 | */ |
224 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) |
225 | { |
226 | __rpc_list_dequeue_task(task); |
227 | } |
228 | |
229 | /* |
230 | * Remove request from queue. |
231 | * Note: must be called with spin lock held. |
232 | */ |
233 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
234 | { |
235 | __rpc_disable_timer(queue, task); |
236 | if (RPC_IS_PRIORITY(queue)) |
237 | __rpc_remove_wait_queue_priority(task); |
238 | else |
239 | list_del(entry: &task->u.tk_wait.list); |
240 | queue->qlen--; |
241 | } |
242 | |
243 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
244 | { |
245 | int i; |
246 | |
247 | spin_lock_init(&queue->lock); |
248 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) |
249 | INIT_LIST_HEAD(list: &queue->tasks[i]); |
250 | queue->maxpriority = nr_queues - 1; |
251 | rpc_reset_waitqueue_priority(queue); |
252 | queue->qlen = 0; |
253 | queue->timer_list.expires = 0; |
254 | INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); |
255 | INIT_LIST_HEAD(list: &queue->timer_list.list); |
256 | rpc_assign_waitqueue_name(q: queue, name: qname); |
257 | } |
258 | |
259 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
260 | { |
261 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
262 | } |
263 | EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); |
264 | |
265 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
266 | { |
267 | __rpc_init_priority_wait_queue(queue, qname, nr_queues: 1); |
268 | } |
269 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); |
270 | |
271 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
272 | { |
273 | cancel_delayed_work_sync(dwork: &queue->timer_list.dwork); |
274 | } |
275 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); |
276 | |
277 | static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) |
278 | { |
279 | schedule(); |
280 | if (signal_pending_state(state: mode, current)) |
281 | return -ERESTARTSYS; |
282 | return 0; |
283 | } |
284 | |
285 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
286 | static void rpc_task_set_debuginfo(struct rpc_task *task) |
287 | { |
288 | struct rpc_clnt *clnt = task->tk_client; |
289 | |
290 | /* Might be a task carrying a reverse-direction operation */ |
291 | if (!clnt) { |
292 | static atomic_t rpc_pid; |
293 | |
294 | task->tk_pid = atomic_inc_return(v: &rpc_pid); |
295 | return; |
296 | } |
297 | |
298 | task->tk_pid = atomic_inc_return(v: &clnt->cl_pid); |
299 | } |
300 | #else |
301 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) |
302 | { |
303 | } |
304 | #endif |
305 | |
306 | static void rpc_set_active(struct rpc_task *task) |
307 | { |
308 | rpc_task_set_debuginfo(task); |
309 | set_bit(RPC_TASK_ACTIVE, addr: &task->tk_runstate); |
310 | trace_rpc_task_begin(task, NULL); |
311 | } |
312 | |
313 | /* |
314 | * Mark an RPC call as having completed by clearing the 'active' bit |
315 | * and then waking up all tasks that were sleeping. |
316 | */ |
317 | static int rpc_complete_task(struct rpc_task *task) |
318 | { |
319 | void *m = &task->tk_runstate; |
320 | wait_queue_head_t *wq = bit_waitqueue(word: m, RPC_TASK_ACTIVE); |
321 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); |
322 | unsigned long flags; |
323 | int ret; |
324 | |
325 | trace_rpc_task_complete(task, NULL); |
326 | |
327 | spin_lock_irqsave(&wq->lock, flags); |
328 | clear_bit(RPC_TASK_ACTIVE, addr: &task->tk_runstate); |
329 | ret = atomic_dec_and_test(v: &task->tk_count); |
330 | if (waitqueue_active(wq_head: wq)) |
331 | __wake_up_locked_key(wq_head: wq, TASK_NORMAL, key: &k); |
332 | spin_unlock_irqrestore(lock: &wq->lock, flags); |
333 | return ret; |
334 | } |
335 | |
336 | /* |
337 | * Allow callers to wait for completion of an RPC call |
338 | * |
339 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() |
340 | * to enforce taking of the wq->lock and hence avoid races with |
341 | * rpc_complete_task(). |
342 | */ |
343 | int rpc_wait_for_completion_task(struct rpc_task *task) |
344 | { |
345 | return out_of_line_wait_on_bit(word: &task->tk_runstate, RPC_TASK_ACTIVE, |
346 | action: rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); |
347 | } |
348 | EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task); |
349 | |
350 | /* |
351 | * Make an RPC task runnable. |
352 | * |
353 | * Note: If the task is ASYNC, and is being made runnable after sitting on an |
354 | * rpc_wait_queue, this must be called with the queue spinlock held to protect |
355 | * the wait queue operation. |
356 | * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), |
357 | * which is needed to ensure that __rpc_execute() doesn't loop (due to the |
358 | * lockless RPC_IS_QUEUED() test) before we've had a chance to test |
359 | * the RPC_TASK_RUNNING flag. |
360 | */ |
361 | static void rpc_make_runnable(struct workqueue_struct *wq, |
362 | struct rpc_task *task) |
363 | { |
364 | bool need_wakeup = !rpc_test_and_set_running(task); |
365 | |
366 | rpc_clear_queued(task); |
367 | if (!need_wakeup) |
368 | return; |
369 | if (RPC_IS_ASYNC(task)) { |
370 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
371 | queue_work(wq, work: &task->u.tk_work); |
372 | } else |
373 | wake_up_bit(word: &task->tk_runstate, RPC_TASK_QUEUED); |
374 | } |
375 | |
376 | /* |
377 | * Prepare for sleeping on a wait queue. |
378 | * By always appending tasks to the list we ensure FIFO behavior. |
379 | * NB: An RPC task will only receive interrupt-driven events as long |
380 | * as it's on a wait queue. |
381 | */ |
382 | static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, |
383 | struct rpc_task *task, |
384 | unsigned char queue_priority) |
385 | { |
386 | trace_rpc_task_sleep(task, q); |
387 | |
388 | __rpc_add_wait_queue(queue: q, task, queue_priority); |
389 | } |
390 | |
391 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
392 | struct rpc_task *task, |
393 | unsigned char queue_priority) |
394 | { |
395 | if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) |
396 | return; |
397 | __rpc_do_sleep_on_priority(q, task, queue_priority); |
398 | } |
399 | |
400 | static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, |
401 | struct rpc_task *task, unsigned long timeout, |
402 | unsigned char queue_priority) |
403 | { |
404 | if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) |
405 | return; |
406 | if (time_is_after_jiffies(timeout)) { |
407 | __rpc_do_sleep_on_priority(q, task, queue_priority); |
408 | __rpc_add_timer(queue: q, task, timeout); |
409 | } else |
410 | task->tk_status = -ETIMEDOUT; |
411 | } |
412 | |
413 | static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) |
414 | { |
415 | if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) |
416 | task->tk_callback = action; |
417 | } |
418 | |
419 | static bool rpc_sleep_check_activated(struct rpc_task *task) |
420 | { |
421 | /* We shouldn't ever put an inactive task to sleep */ |
422 | if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { |
423 | task->tk_status = -EIO; |
424 | rpc_put_task_async(task); |
425 | return false; |
426 | } |
427 | return true; |
428 | } |
429 | |
430 | void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, |
431 | rpc_action action, unsigned long timeout) |
432 | { |
433 | if (!rpc_sleep_check_activated(task)) |
434 | return; |
435 | |
436 | rpc_set_tk_callback(task, action); |
437 | |
438 | /* |
439 | * Protect the queue operations. |
440 | */ |
441 | spin_lock(lock: &q->lock); |
442 | __rpc_sleep_on_priority_timeout(q, task, timeout, queue_priority: task->tk_priority); |
443 | spin_unlock(lock: &q->lock); |
444 | } |
445 | EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); |
446 | |
447 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
448 | rpc_action action) |
449 | { |
450 | if (!rpc_sleep_check_activated(task)) |
451 | return; |
452 | |
453 | rpc_set_tk_callback(task, action); |
454 | |
455 | WARN_ON_ONCE(task->tk_timeout != 0); |
456 | /* |
457 | * Protect the queue operations. |
458 | */ |
459 | spin_lock(lock: &q->lock); |
460 | __rpc_sleep_on_priority(q, task, queue_priority: task->tk_priority); |
461 | spin_unlock(lock: &q->lock); |
462 | } |
463 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
464 | |
465 | void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, |
466 | struct rpc_task *task, unsigned long timeout, int priority) |
467 | { |
468 | if (!rpc_sleep_check_activated(task)) |
469 | return; |
470 | |
471 | priority -= RPC_PRIORITY_LOW; |
472 | /* |
473 | * Protect the queue operations. |
474 | */ |
475 | spin_lock(lock: &q->lock); |
476 | __rpc_sleep_on_priority_timeout(q, task, timeout, queue_priority: priority); |
477 | spin_unlock(lock: &q->lock); |
478 | } |
479 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); |
480 | |
481 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, |
482 | int priority) |
483 | { |
484 | if (!rpc_sleep_check_activated(task)) |
485 | return; |
486 | |
487 | WARN_ON_ONCE(task->tk_timeout != 0); |
488 | priority -= RPC_PRIORITY_LOW; |
489 | /* |
490 | * Protect the queue operations. |
491 | */ |
492 | spin_lock(lock: &q->lock); |
493 | __rpc_sleep_on_priority(q, task, queue_priority: priority); |
494 | spin_unlock(lock: &q->lock); |
495 | } |
496 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); |
497 | |
498 | /** |
499 | * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task |
500 | * @wq: workqueue on which to run task |
501 | * @queue: wait queue |
502 | * @task: task to be woken up |
503 | * |
504 | * Caller must hold queue->lock, and have cleared the task queued flag. |
505 | */ |
506 | static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, |
507 | struct rpc_wait_queue *queue, |
508 | struct rpc_task *task) |
509 | { |
510 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
511 | if (!RPC_IS_ACTIVATED(task)) { |
512 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n" , task); |
513 | return; |
514 | } |
515 | |
516 | trace_rpc_task_wakeup(task, q: queue); |
517 | |
518 | __rpc_remove_wait_queue(queue, task); |
519 | |
520 | rpc_make_runnable(wq, task); |
521 | } |
522 | |
523 | /* |
524 | * Wake up a queued task while the queue lock is being held |
525 | */ |
526 | static struct rpc_task * |
527 | rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, |
528 | struct rpc_wait_queue *queue, struct rpc_task *task, |
529 | bool (*action)(struct rpc_task *, void *), void *data) |
530 | { |
531 | if (RPC_IS_QUEUED(task)) { |
532 | smp_rmb(); |
533 | if (task->tk_waitqueue == queue) { |
534 | if (action == NULL || action(task, data)) { |
535 | __rpc_do_wake_up_task_on_wq(wq, queue, task); |
536 | return task; |
537 | } |
538 | } |
539 | } |
540 | return NULL; |
541 | } |
542 | |
543 | /* |
544 | * Wake up a queued task while the queue lock is being held |
545 | */ |
546 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, |
547 | struct rpc_task *task) |
548 | { |
549 | rpc_wake_up_task_on_wq_queue_action_locked(wq: rpciod_workqueue, queue, |
550 | task, NULL, NULL); |
551 | } |
552 | |
553 | /* |
554 | * Wake up a task on a specific queue |
555 | */ |
556 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
557 | { |
558 | if (!RPC_IS_QUEUED(task)) |
559 | return; |
560 | spin_lock(lock: &queue->lock); |
561 | rpc_wake_up_task_queue_locked(queue, task); |
562 | spin_unlock(lock: &queue->lock); |
563 | } |
564 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
565 | |
566 | static bool rpc_task_action_set_status(struct rpc_task *task, void *status) |
567 | { |
568 | task->tk_status = *(int *)status; |
569 | return true; |
570 | } |
571 | |
572 | static void |
573 | rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, |
574 | struct rpc_task *task, int status) |
575 | { |
576 | rpc_wake_up_task_on_wq_queue_action_locked(wq: rpciod_workqueue, queue, |
577 | task, action: rpc_task_action_set_status, data: &status); |
578 | } |
579 | |
580 | /** |
581 | * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status |
582 | * @queue: pointer to rpc_wait_queue |
583 | * @task: pointer to rpc_task |
584 | * @status: integer error value |
585 | * |
586 | * If @task is queued on @queue, then it is woken up, and @task->tk_status is |
587 | * set to the value of @status. |
588 | */ |
589 | void |
590 | rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, |
591 | struct rpc_task *task, int status) |
592 | { |
593 | if (!RPC_IS_QUEUED(task)) |
594 | return; |
595 | spin_lock(lock: &queue->lock); |
596 | rpc_wake_up_task_queue_set_status_locked(queue, task, status); |
597 | spin_unlock(lock: &queue->lock); |
598 | } |
599 | |
600 | /* |
601 | * Wake up the next task on a priority queue. |
602 | */ |
603 | static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) |
604 | { |
605 | struct list_head *q; |
606 | struct rpc_task *task; |
607 | |
608 | /* |
609 | * Service the privileged queue. |
610 | */ |
611 | q = &queue->tasks[RPC_NR_PRIORITY - 1]; |
612 | if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(head: q)) { |
613 | task = list_first_entry(q, struct rpc_task, u.tk_wait.list); |
614 | goto out; |
615 | } |
616 | |
617 | /* |
618 | * Service a batch of tasks from a single owner. |
619 | */ |
620 | q = &queue->tasks[queue->priority]; |
621 | if (!list_empty(head: q) && queue->nr) { |
622 | queue->nr--; |
623 | task = list_first_entry(q, struct rpc_task, u.tk_wait.list); |
624 | goto out; |
625 | } |
626 | |
627 | /* |
628 | * Service the next queue. |
629 | */ |
630 | do { |
631 | if (q == &queue->tasks[0]) |
632 | q = &queue->tasks[queue->maxpriority]; |
633 | else |
634 | q = q - 1; |
635 | if (!list_empty(head: q)) { |
636 | task = list_first_entry(q, struct rpc_task, u.tk_wait.list); |
637 | goto new_queue; |
638 | } |
639 | } while (q != &queue->tasks[queue->priority]); |
640 | |
641 | rpc_reset_waitqueue_priority(queue); |
642 | return NULL; |
643 | |
644 | new_queue: |
645 | rpc_set_waitqueue_priority(queue, priority: (unsigned int)(q - &queue->tasks[0])); |
646 | out: |
647 | return task; |
648 | } |
649 | |
650 | static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) |
651 | { |
652 | if (RPC_IS_PRIORITY(queue)) |
653 | return __rpc_find_next_queued_priority(queue); |
654 | if (!list_empty(head: &queue->tasks[0])) |
655 | return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); |
656 | return NULL; |
657 | } |
658 | |
659 | /* |
660 | * Wake up the first task on the wait queue. |
661 | */ |
662 | struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, |
663 | struct rpc_wait_queue *queue, |
664 | bool (*func)(struct rpc_task *, void *), void *data) |
665 | { |
666 | struct rpc_task *task = NULL; |
667 | |
668 | spin_lock(lock: &queue->lock); |
669 | task = __rpc_find_next_queued(queue); |
670 | if (task != NULL) |
671 | task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, |
672 | task, action: func, data); |
673 | spin_unlock(lock: &queue->lock); |
674 | |
675 | return task; |
676 | } |
677 | |
678 | /* |
679 | * Wake up the first task on the wait queue. |
680 | */ |
681 | struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, |
682 | bool (*func)(struct rpc_task *, void *), void *data) |
683 | { |
684 | return rpc_wake_up_first_on_wq(wq: rpciod_workqueue, queue, func, data); |
685 | } |
686 | EXPORT_SYMBOL_GPL(rpc_wake_up_first); |
687 | |
688 | static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) |
689 | { |
690 | return true; |
691 | } |
692 | |
693 | /* |
694 | * Wake up the next task on the wait queue. |
695 | */ |
696 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) |
697 | { |
698 | return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); |
699 | } |
700 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
701 | |
702 | /** |
703 | * rpc_wake_up_locked - wake up all rpc_tasks |
704 | * @queue: rpc_wait_queue on which the tasks are sleeping |
705 | * |
706 | */ |
707 | static void rpc_wake_up_locked(struct rpc_wait_queue *queue) |
708 | { |
709 | struct rpc_task *task; |
710 | |
711 | for (;;) { |
712 | task = __rpc_find_next_queued(queue); |
713 | if (task == NULL) |
714 | break; |
715 | rpc_wake_up_task_queue_locked(queue, task); |
716 | } |
717 | } |
718 | |
719 | /** |
720 | * rpc_wake_up - wake up all rpc_tasks |
721 | * @queue: rpc_wait_queue on which the tasks are sleeping |
722 | * |
723 | * Grabs queue->lock |
724 | */ |
725 | void rpc_wake_up(struct rpc_wait_queue *queue) |
726 | { |
727 | spin_lock(lock: &queue->lock); |
728 | rpc_wake_up_locked(queue); |
729 | spin_unlock(lock: &queue->lock); |
730 | } |
731 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
732 | |
733 | /** |
734 | * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. |
735 | * @queue: rpc_wait_queue on which the tasks are sleeping |
736 | * @status: status value to set |
737 | */ |
738 | static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) |
739 | { |
740 | struct rpc_task *task; |
741 | |
742 | for (;;) { |
743 | task = __rpc_find_next_queued(queue); |
744 | if (task == NULL) |
745 | break; |
746 | rpc_wake_up_task_queue_set_status_locked(queue, task, status); |
747 | } |
748 | } |
749 | |
750 | /** |
751 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. |
752 | * @queue: rpc_wait_queue on which the tasks are sleeping |
753 | * @status: status value to set |
754 | * |
755 | * Grabs queue->lock |
756 | */ |
757 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) |
758 | { |
759 | spin_lock(lock: &queue->lock); |
760 | rpc_wake_up_status_locked(queue, status); |
761 | spin_unlock(lock: &queue->lock); |
762 | } |
763 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
764 | |
765 | static void __rpc_queue_timer_fn(struct work_struct *work) |
766 | { |
767 | struct rpc_wait_queue *queue = container_of(work, |
768 | struct rpc_wait_queue, |
769 | timer_list.dwork.work); |
770 | struct rpc_task *task, *n; |
771 | unsigned long expires, now, timeo; |
772 | |
773 | spin_lock(lock: &queue->lock); |
774 | expires = now = jiffies; |
775 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { |
776 | timeo = task->tk_timeout; |
777 | if (time_after_eq(now, timeo)) { |
778 | trace_rpc_task_timeout(task, action: task->tk_action); |
779 | task->tk_status = -ETIMEDOUT; |
780 | rpc_wake_up_task_queue_locked(queue, task); |
781 | continue; |
782 | } |
783 | if (expires == now || time_after(expires, timeo)) |
784 | expires = timeo; |
785 | } |
786 | if (!list_empty(head: &queue->timer_list.list)) |
787 | rpc_set_queue_timer(queue, expires); |
788 | spin_unlock(lock: &queue->lock); |
789 | } |
790 | |
791 | static void __rpc_atrun(struct rpc_task *task) |
792 | { |
793 | if (task->tk_status == -ETIMEDOUT) |
794 | task->tk_status = 0; |
795 | } |
796 | |
797 | /* |
798 | * Run a task at a later time |
799 | */ |
800 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
801 | { |
802 | rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); |
803 | } |
804 | EXPORT_SYMBOL_GPL(rpc_delay); |
805 | |
806 | /* |
807 | * Helper to call task->tk_ops->rpc_call_prepare |
808 | */ |
809 | void rpc_prepare_task(struct rpc_task *task) |
810 | { |
811 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
812 | } |
813 | |
814 | static void |
815 | rpc_init_task_statistics(struct rpc_task *task) |
816 | { |
817 | /* Initialize retry counters */ |
818 | task->tk_garb_retry = 2; |
819 | task->tk_cred_retry = 2; |
820 | |
821 | /* starting timestamp */ |
822 | task->tk_start = ktime_get(); |
823 | } |
824 | |
825 | static void |
826 | rpc_reset_task_statistics(struct rpc_task *task) |
827 | { |
828 | task->tk_timeouts = 0; |
829 | task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); |
830 | rpc_init_task_statistics(task); |
831 | } |
832 | |
833 | /* |
834 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
835 | */ |
836 | void rpc_exit_task(struct rpc_task *task) |
837 | { |
838 | trace_rpc_task_end(task, action: task->tk_action); |
839 | task->tk_action = NULL; |
840 | if (task->tk_ops->rpc_count_stats) |
841 | task->tk_ops->rpc_count_stats(task, task->tk_calldata); |
842 | else if (task->tk_client) |
843 | rpc_count_iostats(task, task->tk_client->cl_metrics); |
844 | if (task->tk_ops->rpc_call_done != NULL) { |
845 | trace_rpc_task_call_done(task, action: task->tk_ops->rpc_call_done); |
846 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
847 | if (task->tk_action != NULL) { |
848 | /* Always release the RPC slot and buffer memory */ |
849 | xprt_release(task); |
850 | rpc_reset_task_statistics(task); |
851 | } |
852 | } |
853 | } |
854 | |
855 | void rpc_signal_task(struct rpc_task *task) |
856 | { |
857 | struct rpc_wait_queue *queue; |
858 | |
859 | if (!RPC_IS_ACTIVATED(task)) |
860 | return; |
861 | |
862 | if (!rpc_task_set_rpc_status(task, rpc_status: -ERESTARTSYS)) |
863 | return; |
864 | trace_rpc_task_signalled(task, action: task->tk_action); |
865 | set_bit(RPC_TASK_SIGNALLED, addr: &task->tk_runstate); |
866 | smp_mb__after_atomic(); |
867 | queue = READ_ONCE(task->tk_waitqueue); |
868 | if (queue) |
869 | rpc_wake_up_queued_task(queue, task); |
870 | } |
871 | |
872 | void rpc_task_try_cancel(struct rpc_task *task, int error) |
873 | { |
874 | struct rpc_wait_queue *queue; |
875 | |
876 | if (!rpc_task_set_rpc_status(task, rpc_status: error)) |
877 | return; |
878 | queue = READ_ONCE(task->tk_waitqueue); |
879 | if (queue) |
880 | rpc_wake_up_queued_task(queue, task); |
881 | } |
882 | |
883 | void rpc_exit(struct rpc_task *task, int status) |
884 | { |
885 | task->tk_status = status; |
886 | task->tk_action = rpc_exit_task; |
887 | rpc_wake_up_queued_task(task->tk_waitqueue, task); |
888 | } |
889 | EXPORT_SYMBOL_GPL(rpc_exit); |
890 | |
891 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
892 | { |
893 | if (ops->rpc_release != NULL) |
894 | ops->rpc_release(calldata); |
895 | } |
896 | |
897 | static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk) |
898 | { |
899 | if (!xprt) |
900 | return false; |
901 | if (!atomic_read(v: &xprt->swapper)) |
902 | return false; |
903 | return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk; |
904 | } |
905 | |
906 | /* |
907 | * This is the RPC `scheduler' (or rather, the finite state machine). |
908 | */ |
909 | static void __rpc_execute(struct rpc_task *task) |
910 | { |
911 | struct rpc_wait_queue *queue; |
912 | int task_is_async = RPC_IS_ASYNC(task); |
913 | int status = 0; |
914 | unsigned long pflags = current->flags; |
915 | |
916 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
917 | if (RPC_IS_QUEUED(task)) |
918 | return; |
919 | |
920 | for (;;) { |
921 | void (*do_action)(struct rpc_task *); |
922 | |
923 | /* |
924 | * Perform the next FSM step or a pending callback. |
925 | * |
926 | * tk_action may be NULL if the task has been killed. |
927 | */ |
928 | do_action = task->tk_action; |
929 | /* Tasks with an RPC error status should exit */ |
930 | if (do_action && do_action != rpc_exit_task && |
931 | (status = READ_ONCE(task->tk_rpc_status)) != 0) { |
932 | task->tk_status = status; |
933 | do_action = rpc_exit_task; |
934 | } |
935 | /* Callbacks override all actions */ |
936 | if (task->tk_callback) { |
937 | do_action = task->tk_callback; |
938 | task->tk_callback = NULL; |
939 | } |
940 | if (!do_action) |
941 | break; |
942 | if (RPC_IS_SWAPPER(task) || |
943 | xprt_needs_memalloc(xprt: task->tk_xprt, tk: task)) |
944 | current->flags |= PF_MEMALLOC; |
945 | |
946 | trace_rpc_task_run_action(task, action: do_action); |
947 | do_action(task); |
948 | |
949 | /* |
950 | * Lockless check for whether task is sleeping or not. |
951 | */ |
952 | if (!RPC_IS_QUEUED(task)) { |
953 | cond_resched(); |
954 | continue; |
955 | } |
956 | |
957 | /* |
958 | * The queue->lock protects against races with |
959 | * rpc_make_runnable(). |
960 | * |
961 | * Note that once we clear RPC_TASK_RUNNING on an asynchronous |
962 | * rpc_task, rpc_make_runnable() can assign it to a |
963 | * different workqueue. We therefore cannot assume that the |
964 | * rpc_task pointer may still be dereferenced. |
965 | */ |
966 | queue = task->tk_waitqueue; |
967 | spin_lock(lock: &queue->lock); |
968 | if (!RPC_IS_QUEUED(task)) { |
969 | spin_unlock(lock: &queue->lock); |
970 | continue; |
971 | } |
972 | /* Wake up any task that has an exit status */ |
973 | if (READ_ONCE(task->tk_rpc_status) != 0) { |
974 | rpc_wake_up_task_queue_locked(queue, task); |
975 | spin_unlock(lock: &queue->lock); |
976 | continue; |
977 | } |
978 | rpc_clear_running(task); |
979 | spin_unlock(lock: &queue->lock); |
980 | if (task_is_async) |
981 | goto out; |
982 | |
983 | /* sync task: sleep here */ |
984 | trace_rpc_task_sync_sleep(task, action: task->tk_action); |
985 | status = out_of_line_wait_on_bit(word: &task->tk_runstate, |
986 | RPC_TASK_QUEUED, action: rpc_wait_bit_killable, |
987 | TASK_KILLABLE|TASK_FREEZABLE); |
988 | if (status < 0) { |
989 | /* |
990 | * When a sync task receives a signal, it exits with |
991 | * -ERESTARTSYS. In order to catch any callbacks that |
992 | * clean up after sleeping on some queue, we don't |
993 | * break the loop here, but go around once more. |
994 | */ |
995 | rpc_signal_task(task); |
996 | } |
997 | trace_rpc_task_sync_wake(task, action: task->tk_action); |
998 | } |
999 | |
1000 | /* Release all resources associated with the task */ |
1001 | rpc_release_task(task); |
1002 | out: |
1003 | current_restore_flags(orig_flags: pflags, PF_MEMALLOC); |
1004 | } |
1005 | |
1006 | /* |
1007 | * User-visible entry point to the scheduler. |
1008 | * |
1009 | * This may be called recursively if e.g. an async NFS task updates |
1010 | * the attributes and finds that dirty pages must be flushed. |
1011 | * NOTE: Upon exit of this function the task is guaranteed to be |
1012 | * released. In particular note that tk_release() will have |
1013 | * been called, so your task memory may have been freed. |
1014 | */ |
1015 | void rpc_execute(struct rpc_task *task) |
1016 | { |
1017 | bool is_async = RPC_IS_ASYNC(task); |
1018 | |
1019 | rpc_set_active(task); |
1020 | rpc_make_runnable(wq: rpciod_workqueue, task); |
1021 | if (!is_async) { |
1022 | unsigned int pflags = memalloc_nofs_save(); |
1023 | __rpc_execute(task); |
1024 | memalloc_nofs_restore(flags: pflags); |
1025 | } |
1026 | } |
1027 | |
1028 | static void rpc_async_schedule(struct work_struct *work) |
1029 | { |
1030 | unsigned int pflags = memalloc_nofs_save(); |
1031 | |
1032 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
1033 | memalloc_nofs_restore(flags: pflags); |
1034 | } |
1035 | |
1036 | /** |
1037 | * rpc_malloc - allocate RPC buffer resources |
1038 | * @task: RPC task |
1039 | * |
1040 | * A single memory region is allocated, which is split between the |
1041 | * RPC call and RPC reply that this task is being used for. When |
1042 | * this RPC is retired, the memory is released by calling rpc_free. |
1043 | * |
1044 | * To prevent rpciod from hanging, this allocator never sleeps, |
1045 | * returning -ENOMEM and suppressing warning if the request cannot |
1046 | * be serviced immediately. The caller can arrange to sleep in a |
1047 | * way that is safe for rpciod. |
1048 | * |
1049 | * Most requests are 'small' (under 2KiB) and can be serviced from a |
1050 | * mempool, ensuring that NFS reads and writes can always proceed, |
1051 | * and that there is good locality of reference for these buffers. |
1052 | */ |
1053 | int rpc_malloc(struct rpc_task *task) |
1054 | { |
1055 | struct rpc_rqst *rqst = task->tk_rqstp; |
1056 | size_t size = rqst->rq_callsize + rqst->rq_rcvsize; |
1057 | struct rpc_buffer *buf; |
1058 | gfp_t gfp = rpc_task_gfp_mask(); |
1059 | |
1060 | size += sizeof(struct rpc_buffer); |
1061 | if (size <= RPC_BUFFER_MAXSIZE) { |
1062 | buf = kmem_cache_alloc(cachep: rpc_buffer_slabp, flags: gfp); |
1063 | /* Reach for the mempool if dynamic allocation fails */ |
1064 | if (!buf && RPC_IS_ASYNC(task)) |
1065 | buf = mempool_alloc(pool: rpc_buffer_mempool, GFP_NOWAIT); |
1066 | } else |
1067 | buf = kmalloc(size, flags: gfp); |
1068 | |
1069 | if (!buf) |
1070 | return -ENOMEM; |
1071 | |
1072 | buf->len = size; |
1073 | rqst->rq_buffer = buf->data; |
1074 | rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; |
1075 | return 0; |
1076 | } |
1077 | EXPORT_SYMBOL_GPL(rpc_malloc); |
1078 | |
1079 | /** |
1080 | * rpc_free - free RPC buffer resources allocated via rpc_malloc |
1081 | * @task: RPC task |
1082 | * |
1083 | */ |
1084 | void rpc_free(struct rpc_task *task) |
1085 | { |
1086 | void *buffer = task->tk_rqstp->rq_buffer; |
1087 | size_t size; |
1088 | struct rpc_buffer *buf; |
1089 | |
1090 | buf = container_of(buffer, struct rpc_buffer, data); |
1091 | size = buf->len; |
1092 | |
1093 | if (size <= RPC_BUFFER_MAXSIZE) |
1094 | mempool_free(element: buf, pool: rpc_buffer_mempool); |
1095 | else |
1096 | kfree(objp: buf); |
1097 | } |
1098 | EXPORT_SYMBOL_GPL(rpc_free); |
1099 | |
1100 | /* |
1101 | * Creation and deletion of RPC task structures |
1102 | */ |
1103 | static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) |
1104 | { |
1105 | memset(task, 0, sizeof(*task)); |
1106 | atomic_set(v: &task->tk_count, i: 1); |
1107 | task->tk_flags = task_setup_data->flags; |
1108 | task->tk_ops = task_setup_data->callback_ops; |
1109 | task->tk_calldata = task_setup_data->callback_data; |
1110 | INIT_LIST_HEAD(list: &task->tk_task); |
1111 | |
1112 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
1113 | task->tk_owner = current->tgid; |
1114 | |
1115 | /* Initialize workqueue for async tasks */ |
1116 | task->tk_workqueue = task_setup_data->workqueue; |
1117 | |
1118 | task->tk_xprt = rpc_task_get_xprt(clnt: task_setup_data->rpc_client, |
1119 | xprt: xprt_get(xprt: task_setup_data->rpc_xprt)); |
1120 | |
1121 | task->tk_op_cred = get_rpccred(cred: task_setup_data->rpc_op_cred); |
1122 | |
1123 | if (task->tk_ops->rpc_call_prepare != NULL) |
1124 | task->tk_action = rpc_prepare_task; |
1125 | |
1126 | rpc_init_task_statistics(task); |
1127 | } |
1128 | |
1129 | static struct rpc_task *rpc_alloc_task(void) |
1130 | { |
1131 | struct rpc_task *task; |
1132 | |
1133 | task = kmem_cache_alloc(cachep: rpc_task_slabp, flags: rpc_task_gfp_mask()); |
1134 | if (task) |
1135 | return task; |
1136 | return mempool_alloc(pool: rpc_task_mempool, GFP_NOWAIT); |
1137 | } |
1138 | |
1139 | /* |
1140 | * Create a new task for the specified client. |
1141 | */ |
1142 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) |
1143 | { |
1144 | struct rpc_task *task = setup_data->task; |
1145 | unsigned short flags = 0; |
1146 | |
1147 | if (task == NULL) { |
1148 | task = rpc_alloc_task(); |
1149 | if (task == NULL) { |
1150 | rpc_release_calldata(ops: setup_data->callback_ops, |
1151 | calldata: setup_data->callback_data); |
1152 | return ERR_PTR(error: -ENOMEM); |
1153 | } |
1154 | flags = RPC_TASK_DYNAMIC; |
1155 | } |
1156 | |
1157 | rpc_init_task(task, task_setup_data: setup_data); |
1158 | task->tk_flags |= flags; |
1159 | return task; |
1160 | } |
1161 | |
1162 | /* |
1163 | * rpc_free_task - release rpc task and perform cleanups |
1164 | * |
1165 | * Note that we free up the rpc_task _after_ rpc_release_calldata() |
1166 | * in order to work around a workqueue dependency issue. |
1167 | * |
1168 | * Tejun Heo states: |
1169 | * "Workqueue currently considers two work items to be the same if they're |
1170 | * on the same address and won't execute them concurrently - ie. it |
1171 | * makes a work item which is queued again while being executed wait |
1172 | * for the previous execution to complete. |
1173 | * |
1174 | * If a work function frees the work item, and then waits for an event |
1175 | * which should be performed by another work item and *that* work item |
1176 | * recycles the freed work item, it can create a false dependency loop. |
1177 | * There really is no reliable way to detect this short of verifying |
1178 | * every memory free." |
1179 | * |
1180 | */ |
1181 | static void rpc_free_task(struct rpc_task *task) |
1182 | { |
1183 | unsigned short tk_flags = task->tk_flags; |
1184 | |
1185 | put_rpccred(task->tk_op_cred); |
1186 | rpc_release_calldata(ops: task->tk_ops, calldata: task->tk_calldata); |
1187 | |
1188 | if (tk_flags & RPC_TASK_DYNAMIC) |
1189 | mempool_free(element: task, pool: rpc_task_mempool); |
1190 | } |
1191 | |
1192 | static void rpc_async_release(struct work_struct *work) |
1193 | { |
1194 | unsigned int pflags = memalloc_nofs_save(); |
1195 | |
1196 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
1197 | memalloc_nofs_restore(flags: pflags); |
1198 | } |
1199 | |
1200 | static void rpc_release_resources_task(struct rpc_task *task) |
1201 | { |
1202 | xprt_release(task); |
1203 | if (task->tk_msg.rpc_cred) { |
1204 | if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) |
1205 | put_cred(cred: task->tk_msg.rpc_cred); |
1206 | task->tk_msg.rpc_cred = NULL; |
1207 | } |
1208 | rpc_task_release_client(task); |
1209 | } |
1210 | |
1211 | static void rpc_final_put_task(struct rpc_task *task, |
1212 | struct workqueue_struct *q) |
1213 | { |
1214 | if (q != NULL) { |
1215 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
1216 | queue_work(wq: q, work: &task->u.tk_work); |
1217 | } else |
1218 | rpc_free_task(task); |
1219 | } |
1220 | |
1221 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) |
1222 | { |
1223 | if (atomic_dec_and_test(v: &task->tk_count)) { |
1224 | rpc_release_resources_task(task); |
1225 | rpc_final_put_task(task, q); |
1226 | } |
1227 | } |
1228 | |
1229 | void rpc_put_task(struct rpc_task *task) |
1230 | { |
1231 | rpc_do_put_task(task, NULL); |
1232 | } |
1233 | EXPORT_SYMBOL_GPL(rpc_put_task); |
1234 | |
1235 | void rpc_put_task_async(struct rpc_task *task) |
1236 | { |
1237 | rpc_do_put_task(task, q: task->tk_workqueue); |
1238 | } |
1239 | EXPORT_SYMBOL_GPL(rpc_put_task_async); |
1240 | |
1241 | static void rpc_release_task(struct rpc_task *task) |
1242 | { |
1243 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
1244 | |
1245 | rpc_release_resources_task(task); |
1246 | |
1247 | /* |
1248 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, |
1249 | * so it should be safe to use task->tk_count as a test for whether |
1250 | * or not any other processes still hold references to our rpc_task. |
1251 | */ |
1252 | if (atomic_read(v: &task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { |
1253 | /* Wake up anyone who may be waiting for task completion */ |
1254 | if (!rpc_complete_task(task)) |
1255 | return; |
1256 | } else { |
1257 | if (!atomic_dec_and_test(v: &task->tk_count)) |
1258 | return; |
1259 | } |
1260 | rpc_final_put_task(task, q: task->tk_workqueue); |
1261 | } |
1262 | |
1263 | int rpciod_up(void) |
1264 | { |
1265 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; |
1266 | } |
1267 | |
1268 | void rpciod_down(void) |
1269 | { |
1270 | module_put(THIS_MODULE); |
1271 | } |
1272 | |
1273 | /* |
1274 | * Start up the rpciod workqueue. |
1275 | */ |
1276 | static int rpciod_start(void) |
1277 | { |
1278 | struct workqueue_struct *wq; |
1279 | |
1280 | /* |
1281 | * Create the rpciod thread and wait for it to start. |
1282 | */ |
1283 | wq = alloc_workqueue(fmt: "rpciod" , flags: WQ_MEM_RECLAIM | WQ_UNBOUND, max_active: 0); |
1284 | if (!wq) |
1285 | goto out_failed; |
1286 | rpciod_workqueue = wq; |
1287 | wq = alloc_workqueue(fmt: "xprtiod" , flags: WQ_UNBOUND | WQ_MEM_RECLAIM, max_active: 0); |
1288 | if (!wq) |
1289 | goto free_rpciod; |
1290 | xprtiod_workqueue = wq; |
1291 | return 1; |
1292 | free_rpciod: |
1293 | wq = rpciod_workqueue; |
1294 | rpciod_workqueue = NULL; |
1295 | destroy_workqueue(wq); |
1296 | out_failed: |
1297 | return 0; |
1298 | } |
1299 | |
1300 | static void rpciod_stop(void) |
1301 | { |
1302 | struct workqueue_struct *wq = NULL; |
1303 | |
1304 | if (rpciod_workqueue == NULL) |
1305 | return; |
1306 | |
1307 | wq = rpciod_workqueue; |
1308 | rpciod_workqueue = NULL; |
1309 | destroy_workqueue(wq); |
1310 | wq = xprtiod_workqueue; |
1311 | xprtiod_workqueue = NULL; |
1312 | destroy_workqueue(wq); |
1313 | } |
1314 | |
1315 | void |
1316 | rpc_destroy_mempool(void) |
1317 | { |
1318 | rpciod_stop(); |
1319 | mempool_destroy(pool: rpc_buffer_mempool); |
1320 | mempool_destroy(pool: rpc_task_mempool); |
1321 | kmem_cache_destroy(s: rpc_task_slabp); |
1322 | kmem_cache_destroy(s: rpc_buffer_slabp); |
1323 | rpc_destroy_wait_queue(&delay_queue); |
1324 | } |
1325 | |
1326 | int |
1327 | rpc_init_mempool(void) |
1328 | { |
1329 | /* |
1330 | * The following is not strictly a mempool initialisation, |
1331 | * but there is no harm in doing it here |
1332 | */ |
1333 | rpc_init_wait_queue(&delay_queue, "delayq" ); |
1334 | if (!rpciod_start()) |
1335 | goto err_nomem; |
1336 | |
1337 | rpc_task_slabp = kmem_cache_create(name: "rpc_tasks" , |
1338 | size: sizeof(struct rpc_task), |
1339 | align: 0, SLAB_HWCACHE_ALIGN, |
1340 | NULL); |
1341 | if (!rpc_task_slabp) |
1342 | goto err_nomem; |
1343 | rpc_buffer_slabp = kmem_cache_create(name: "rpc_buffers" , |
1344 | RPC_BUFFER_MAXSIZE, |
1345 | align: 0, SLAB_HWCACHE_ALIGN, |
1346 | NULL); |
1347 | if (!rpc_buffer_slabp) |
1348 | goto err_nomem; |
1349 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1350 | kc: rpc_task_slabp); |
1351 | if (!rpc_task_mempool) |
1352 | goto err_nomem; |
1353 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1354 | kc: rpc_buffer_slabp); |
1355 | if (!rpc_buffer_mempool) |
1356 | goto err_nomem; |
1357 | return 0; |
1358 | err_nomem: |
1359 | rpc_destroy_mempool(); |
1360 | return -ENOMEM; |
1361 | } |
1362 | |