1/*
2 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
3 * for the blk-mq scheduling framework
4 *
5 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
6 */
7#include <linux/kernel.h>
8#include <linux/fs.h>
9#include <linux/blkdev.h>
10#include <linux/blk-mq.h>
11#include <linux/elevator.h>
12#include <linux/bio.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/compiler.h>
17#include <linux/rbtree.h>
18#include <linux/sbitmap.h>
19
20#include "blk.h"
21#include "blk-mq.h"
22#include "blk-mq-debugfs.h"
23#include "blk-mq-tag.h"
24#include "blk-mq-sched.h"
25
26/*
27 * See Documentation/block/deadline-iosched.txt
28 */
29static const int read_expire = HZ / 2; /* max time before a read is submitted. */
30static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
31static const int writes_starved = 2; /* max times reads can starve a write */
32static const int fifo_batch = 16; /* # of sequential requests treated as one
33 by the above parameters. For throughput. */
34
35struct deadline_data {
36 /*
37 * run time data
38 */
39
40 /*
41 * requests (deadline_rq s) are present on both sort_list and fifo_list
42 */
43 struct rb_root sort_list[2];
44 struct list_head fifo_list[2];
45
46 /*
47 * next in sort order. read, write or both are NULL
48 */
49 struct request *next_rq[2];
50 unsigned int batching; /* number of sequential requests made */
51 unsigned int starved; /* times reads have starved writes */
52
53 /*
54 * settings that change how the i/o scheduler behaves
55 */
56 int fifo_expire[2];
57 int fifo_batch;
58 int writes_starved;
59 int front_merges;
60
61 spinlock_t lock;
62 spinlock_t zone_lock;
63 struct list_head dispatch;
64};
65
66static inline struct rb_root *
67deadline_rb_root(struct deadline_data *dd, struct request *rq)
68{
69 return &dd->sort_list[rq_data_dir(rq)];
70}
71
72/*
73 * get the request after `rq' in sector-sorted order
74 */
75static inline struct request *
76deadline_latter_request(struct request *rq)
77{
78 struct rb_node *node = rb_next(&rq->rb_node);
79
80 if (node)
81 return rb_entry_rq(node);
82
83 return NULL;
84}
85
86static void
87deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
88{
89 struct rb_root *root = deadline_rb_root(dd, rq);
90
91 elv_rb_add(root, rq);
92}
93
94static inline void
95deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
96{
97 const int data_dir = rq_data_dir(rq);
98
99 if (dd->next_rq[data_dir] == rq)
100 dd->next_rq[data_dir] = deadline_latter_request(rq);
101
102 elv_rb_del(deadline_rb_root(dd, rq), rq);
103}
104
105/*
106 * remove rq from rbtree and fifo.
107 */
108static void deadline_remove_request(struct request_queue *q, struct request *rq)
109{
110 struct deadline_data *dd = q->elevator->elevator_data;
111
112 list_del_init(&rq->queuelist);
113
114 /*
115 * We might not be on the rbtree, if we are doing an insert merge
116 */
117 if (!RB_EMPTY_NODE(&rq->rb_node))
118 deadline_del_rq_rb(dd, rq);
119
120 elv_rqhash_del(q, rq);
121 if (q->last_merge == rq)
122 q->last_merge = NULL;
123}
124
125static void dd_request_merged(struct request_queue *q, struct request *req,
126 enum elv_merge type)
127{
128 struct deadline_data *dd = q->elevator->elevator_data;
129
130 /*
131 * if the merge was a front merge, we need to reposition request
132 */
133 if (type == ELEVATOR_FRONT_MERGE) {
134 elv_rb_del(deadline_rb_root(dd, req), req);
135 deadline_add_rq_rb(dd, req);
136 }
137}
138
139static void dd_merged_requests(struct request_queue *q, struct request *req,
140 struct request *next)
141{
142 /*
143 * if next expires before rq, assign its expire time to rq
144 * and move into next position (next will be deleted) in fifo
145 */
146 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
147 if (time_before((unsigned long)next->fifo_time,
148 (unsigned long)req->fifo_time)) {
149 list_move(&req->queuelist, &next->queuelist);
150 req->fifo_time = next->fifo_time;
151 }
152 }
153
154 /*
155 * kill knowledge of next, this one is a goner
156 */
157 deadline_remove_request(q, next);
158}
159
160/*
161 * move an entry to dispatch queue
162 */
163static void
164deadline_move_request(struct deadline_data *dd, struct request *rq)
165{
166 const int data_dir = rq_data_dir(rq);
167
168 dd->next_rq[READ] = NULL;
169 dd->next_rq[WRITE] = NULL;
170 dd->next_rq[data_dir] = deadline_latter_request(rq);
171
172 /*
173 * take it off the sort and fifo list
174 */
175 deadline_remove_request(rq->q, rq);
176}
177
178/*
179 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
180 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
181 */
182static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
183{
184 struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
185
186 /*
187 * rq is expired!
188 */
189 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
190 return 1;
191
192 return 0;
193}
194
195/*
196 * For the specified data direction, return the next request to
197 * dispatch using arrival ordered lists.
198 */
199static struct request *
200deadline_fifo_request(struct deadline_data *dd, int data_dir)
201{
202 struct request *rq;
203 unsigned long flags;
204
205 if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
206 return NULL;
207
208 if (list_empty(&dd->fifo_list[data_dir]))
209 return NULL;
210
211 rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
212 if (data_dir == READ || !blk_queue_is_zoned(rq->q))
213 return rq;
214
215 /*
216 * Look for a write request that can be dispatched, that is one with
217 * an unlocked target zone.
218 */
219 spin_lock_irqsave(&dd->zone_lock, flags);
220 list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
221 if (blk_req_can_dispatch_to_zone(rq))
222 goto out;
223 }
224 rq = NULL;
225out:
226 spin_unlock_irqrestore(&dd->zone_lock, flags);
227
228 return rq;
229}
230
231/*
232 * For the specified data direction, return the next request to
233 * dispatch using sector position sorted lists.
234 */
235static struct request *
236deadline_next_request(struct deadline_data *dd, int data_dir)
237{
238 struct request *rq;
239 unsigned long flags;
240
241 if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
242 return NULL;
243
244 rq = dd->next_rq[data_dir];
245 if (!rq)
246 return NULL;
247
248 if (data_dir == READ || !blk_queue_is_zoned(rq->q))
249 return rq;
250
251 /*
252 * Look for a write request that can be dispatched, that is one with
253 * an unlocked target zone.
254 */
255 spin_lock_irqsave(&dd->zone_lock, flags);
256 while (rq) {
257 if (blk_req_can_dispatch_to_zone(rq))
258 break;
259 rq = deadline_latter_request(rq);
260 }
261 spin_unlock_irqrestore(&dd->zone_lock, flags);
262
263 return rq;
264}
265
266/*
267 * deadline_dispatch_requests selects the best request according to
268 * read/write expire, fifo_batch, etc
269 */
270static struct request *__dd_dispatch_request(struct deadline_data *dd)
271{
272 struct request *rq, *next_rq;
273 bool reads, writes;
274 int data_dir;
275
276 if (!list_empty(&dd->dispatch)) {
277 rq = list_first_entry(&dd->dispatch, struct request, queuelist);
278 list_del_init(&rq->queuelist);
279 goto done;
280 }
281
282 reads = !list_empty(&dd->fifo_list[READ]);
283 writes = !list_empty(&dd->fifo_list[WRITE]);
284
285 /*
286 * batches are currently reads XOR writes
287 */
288 rq = deadline_next_request(dd, WRITE);
289 if (!rq)
290 rq = deadline_next_request(dd, READ);
291
292 if (rq && dd->batching < dd->fifo_batch)
293 /* we have a next request are still entitled to batch */
294 goto dispatch_request;
295
296 /*
297 * at this point we are not running a batch. select the appropriate
298 * data direction (read / write)
299 */
300
301 if (reads) {
302 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
303
304 if (deadline_fifo_request(dd, WRITE) &&
305 (dd->starved++ >= dd->writes_starved))
306 goto dispatch_writes;
307
308 data_dir = READ;
309
310 goto dispatch_find_request;
311 }
312
313 /*
314 * there are either no reads or writes have been starved
315 */
316
317 if (writes) {
318dispatch_writes:
319 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
320
321 dd->starved = 0;
322
323 data_dir = WRITE;
324
325 goto dispatch_find_request;
326 }
327
328 return NULL;
329
330dispatch_find_request:
331 /*
332 * we are not running a batch, find best request for selected data_dir
333 */
334 next_rq = deadline_next_request(dd, data_dir);
335 if (deadline_check_fifo(dd, data_dir) || !next_rq) {
336 /*
337 * A deadline has expired, the last request was in the other
338 * direction, or we have run out of higher-sectored requests.
339 * Start again from the request with the earliest expiry time.
340 */
341 rq = deadline_fifo_request(dd, data_dir);
342 } else {
343 /*
344 * The last req was the same dir and we have a next request in
345 * sort order. No expired requests so continue on from here.
346 */
347 rq = next_rq;
348 }
349
350 /*
351 * For a zoned block device, if we only have writes queued and none of
352 * them can be dispatched, rq will be NULL.
353 */
354 if (!rq)
355 return NULL;
356
357 dd->batching = 0;
358
359dispatch_request:
360 /*
361 * rq is the selected appropriate request.
362 */
363 dd->batching++;
364 deadline_move_request(dd, rq);
365done:
366 /*
367 * If the request needs its target zone locked, do it.
368 */
369 blk_req_zone_write_lock(rq);
370 rq->rq_flags |= RQF_STARTED;
371 return rq;
372}
373
374/*
375 * One confusing aspect here is that we get called for a specific
376 * hardware queue, but we may return a request that is for a
377 * different hardware queue. This is because mq-deadline has shared
378 * state for all hardware queues, in terms of sorting, FIFOs, etc.
379 *
380 * For a zoned block device, __dd_dispatch_request() may return NULL
381 * if all the queued write requests are directed at zones that are already
382 * locked due to on-going write requests. In this case, make sure to mark
383 * the queue as needing a restart to ensure that the queue is run again
384 * and the pending writes dispatched once the target zones for the ongoing
385 * write requests are unlocked in dd_finish_request().
386 */
387static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
388{
389 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
390 struct request *rq;
391
392 spin_lock(&dd->lock);
393 rq = __dd_dispatch_request(dd);
394 if (!rq && blk_queue_is_zoned(hctx->queue) &&
395 !list_empty(&dd->fifo_list[WRITE]))
396 blk_mq_sched_mark_restart_hctx(hctx);
397 spin_unlock(&dd->lock);
398
399 return rq;
400}
401
402static void dd_exit_queue(struct elevator_queue *e)
403{
404 struct deadline_data *dd = e->elevator_data;
405
406 BUG_ON(!list_empty(&dd->fifo_list[READ]));
407 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
408
409 kfree(dd);
410}
411
412/*
413 * initialize elevator private data (deadline_data).
414 */
415static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
416{
417 struct deadline_data *dd;
418 struct elevator_queue *eq;
419
420 eq = elevator_alloc(q, e);
421 if (!eq)
422 return -ENOMEM;
423
424 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
425 if (!dd) {
426 kobject_put(&eq->kobj);
427 return -ENOMEM;
428 }
429 eq->elevator_data = dd;
430
431 INIT_LIST_HEAD(&dd->fifo_list[READ]);
432 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
433 dd->sort_list[READ] = RB_ROOT;
434 dd->sort_list[WRITE] = RB_ROOT;
435 dd->fifo_expire[READ] = read_expire;
436 dd->fifo_expire[WRITE] = write_expire;
437 dd->writes_starved = writes_starved;
438 dd->front_merges = 1;
439 dd->fifo_batch = fifo_batch;
440 spin_lock_init(&dd->lock);
441 spin_lock_init(&dd->zone_lock);
442 INIT_LIST_HEAD(&dd->dispatch);
443
444 q->elevator = eq;
445 return 0;
446}
447
448static int dd_request_merge(struct request_queue *q, struct request **rq,
449 struct bio *bio)
450{
451 struct deadline_data *dd = q->elevator->elevator_data;
452 sector_t sector = bio_end_sector(bio);
453 struct request *__rq;
454
455 if (!dd->front_merges)
456 return ELEVATOR_NO_MERGE;
457
458 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
459 if (__rq) {
460 BUG_ON(sector != blk_rq_pos(__rq));
461
462 if (elv_bio_merge_ok(__rq, bio)) {
463 *rq = __rq;
464 return ELEVATOR_FRONT_MERGE;
465 }
466 }
467
468 return ELEVATOR_NO_MERGE;
469}
470
471static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
472{
473 struct request_queue *q = hctx->queue;
474 struct deadline_data *dd = q->elevator->elevator_data;
475 struct request *free = NULL;
476 bool ret;
477
478 spin_lock(&dd->lock);
479 ret = blk_mq_sched_try_merge(q, bio, &free);
480 spin_unlock(&dd->lock);
481
482 if (free)
483 blk_mq_free_request(free);
484
485 return ret;
486}
487
488/*
489 * add rq to rbtree and fifo
490 */
491static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
492 bool at_head)
493{
494 struct request_queue *q = hctx->queue;
495 struct deadline_data *dd = q->elevator->elevator_data;
496 const int data_dir = rq_data_dir(rq);
497
498 /*
499 * This may be a requeue of a write request that has locked its
500 * target zone. If it is the case, this releases the zone lock.
501 */
502 blk_req_zone_write_unlock(rq);
503
504 if (blk_mq_sched_try_insert_merge(q, rq))
505 return;
506
507 blk_mq_sched_request_inserted(rq);
508
509 if (at_head || blk_rq_is_passthrough(rq)) {
510 if (at_head)
511 list_add(&rq->queuelist, &dd->dispatch);
512 else
513 list_add_tail(&rq->queuelist, &dd->dispatch);
514 } else {
515 deadline_add_rq_rb(dd, rq);
516
517 if (rq_mergeable(rq)) {
518 elv_rqhash_add(q, rq);
519 if (!q->last_merge)
520 q->last_merge = rq;
521 }
522
523 /*
524 * set expire time and add to fifo list
525 */
526 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
527 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
528 }
529}
530
531static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
532 struct list_head *list, bool at_head)
533{
534 struct request_queue *q = hctx->queue;
535 struct deadline_data *dd = q->elevator->elevator_data;
536
537 spin_lock(&dd->lock);
538 while (!list_empty(list)) {
539 struct request *rq;
540
541 rq = list_first_entry(list, struct request, queuelist);
542 list_del_init(&rq->queuelist);
543 dd_insert_request(hctx, rq, at_head);
544 }
545 spin_unlock(&dd->lock);
546}
547
548/*
549 * Nothing to do here. This is defined only to ensure that .finish_request
550 * method is called upon request completion.
551 */
552static void dd_prepare_request(struct request *rq, struct bio *bio)
553{
554}
555
556/*
557 * For zoned block devices, write unlock the target zone of
558 * completed write requests. Do this while holding the zone lock
559 * spinlock so that the zone is never unlocked while deadline_fifo_request()
560 * or deadline_next_request() are executing. This function is called for
561 * all requests, whether or not these requests complete successfully.
562 */
563static void dd_finish_request(struct request *rq)
564{
565 struct request_queue *q = rq->q;
566
567 if (blk_queue_is_zoned(q)) {
568 struct deadline_data *dd = q->elevator->elevator_data;
569 unsigned long flags;
570
571 spin_lock_irqsave(&dd->zone_lock, flags);
572 blk_req_zone_write_unlock(rq);
573 spin_unlock_irqrestore(&dd->zone_lock, flags);
574 }
575}
576
577static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
578{
579 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
580
581 return !list_empty_careful(&dd->dispatch) ||
582 !list_empty_careful(&dd->fifo_list[0]) ||
583 !list_empty_careful(&dd->fifo_list[1]);
584}
585
586/*
587 * sysfs parts below
588 */
589static ssize_t
590deadline_var_show(int var, char *page)
591{
592 return sprintf(page, "%d\n", var);
593}
594
595static void
596deadline_var_store(int *var, const char *page)
597{
598 char *p = (char *) page;
599
600 *var = simple_strtol(p, &p, 10);
601}
602
603#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
604static ssize_t __FUNC(struct elevator_queue *e, char *page) \
605{ \
606 struct deadline_data *dd = e->elevator_data; \
607 int __data = __VAR; \
608 if (__CONV) \
609 __data = jiffies_to_msecs(__data); \
610 return deadline_var_show(__data, (page)); \
611}
612SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
613SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
614SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
615SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
616SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
617#undef SHOW_FUNCTION
618
619#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
620static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
621{ \
622 struct deadline_data *dd = e->elevator_data; \
623 int __data; \
624 deadline_var_store(&__data, (page)); \
625 if (__data < (MIN)) \
626 __data = (MIN); \
627 else if (__data > (MAX)) \
628 __data = (MAX); \
629 if (__CONV) \
630 *(__PTR) = msecs_to_jiffies(__data); \
631 else \
632 *(__PTR) = __data; \
633 return count; \
634}
635STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
636STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
637STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
638STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
639STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
640#undef STORE_FUNCTION
641
642#define DD_ATTR(name) \
643 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
644
645static struct elv_fs_entry deadline_attrs[] = {
646 DD_ATTR(read_expire),
647 DD_ATTR(write_expire),
648 DD_ATTR(writes_starved),
649 DD_ATTR(front_merges),
650 DD_ATTR(fifo_batch),
651 __ATTR_NULL
652};
653
654#ifdef CONFIG_BLK_DEBUG_FS
655#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
656static void *deadline_##name##_fifo_start(struct seq_file *m, \
657 loff_t *pos) \
658 __acquires(&dd->lock) \
659{ \
660 struct request_queue *q = m->private; \
661 struct deadline_data *dd = q->elevator->elevator_data; \
662 \
663 spin_lock(&dd->lock); \
664 return seq_list_start(&dd->fifo_list[ddir], *pos); \
665} \
666 \
667static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
668 loff_t *pos) \
669{ \
670 struct request_queue *q = m->private; \
671 struct deadline_data *dd = q->elevator->elevator_data; \
672 \
673 return seq_list_next(v, &dd->fifo_list[ddir], pos); \
674} \
675 \
676static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
677 __releases(&dd->lock) \
678{ \
679 struct request_queue *q = m->private; \
680 struct deadline_data *dd = q->elevator->elevator_data; \
681 \
682 spin_unlock(&dd->lock); \
683} \
684 \
685static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
686 .start = deadline_##name##_fifo_start, \
687 .next = deadline_##name##_fifo_next, \
688 .stop = deadline_##name##_fifo_stop, \
689 .show = blk_mq_debugfs_rq_show, \
690}; \
691 \
692static int deadline_##name##_next_rq_show(void *data, \
693 struct seq_file *m) \
694{ \
695 struct request_queue *q = data; \
696 struct deadline_data *dd = q->elevator->elevator_data; \
697 struct request *rq = dd->next_rq[ddir]; \
698 \
699 if (rq) \
700 __blk_mq_debugfs_rq_show(m, rq); \
701 return 0; \
702}
703DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
704DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
705#undef DEADLINE_DEBUGFS_DDIR_ATTRS
706
707static int deadline_batching_show(void *data, struct seq_file *m)
708{
709 struct request_queue *q = data;
710 struct deadline_data *dd = q->elevator->elevator_data;
711
712 seq_printf(m, "%u\n", dd->batching);
713 return 0;
714}
715
716static int deadline_starved_show(void *data, struct seq_file *m)
717{
718 struct request_queue *q = data;
719 struct deadline_data *dd = q->elevator->elevator_data;
720
721 seq_printf(m, "%u\n", dd->starved);
722 return 0;
723}
724
725static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
726 __acquires(&dd->lock)
727{
728 struct request_queue *q = m->private;
729 struct deadline_data *dd = q->elevator->elevator_data;
730
731 spin_lock(&dd->lock);
732 return seq_list_start(&dd->dispatch, *pos);
733}
734
735static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
736{
737 struct request_queue *q = m->private;
738 struct deadline_data *dd = q->elevator->elevator_data;
739
740 return seq_list_next(v, &dd->dispatch, pos);
741}
742
743static void deadline_dispatch_stop(struct seq_file *m, void *v)
744 __releases(&dd->lock)
745{
746 struct request_queue *q = m->private;
747 struct deadline_data *dd = q->elevator->elevator_data;
748
749 spin_unlock(&dd->lock);
750}
751
752static const struct seq_operations deadline_dispatch_seq_ops = {
753 .start = deadline_dispatch_start,
754 .next = deadline_dispatch_next,
755 .stop = deadline_dispatch_stop,
756 .show = blk_mq_debugfs_rq_show,
757};
758
759#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
760 {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
761 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
762static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
763 DEADLINE_QUEUE_DDIR_ATTRS(read),
764 DEADLINE_QUEUE_DDIR_ATTRS(write),
765 {"batching", 0400, deadline_batching_show},
766 {"starved", 0400, deadline_starved_show},
767 {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
768 {},
769};
770#undef DEADLINE_QUEUE_DDIR_ATTRS
771#endif
772
773static struct elevator_type mq_deadline = {
774 .ops = {
775 .insert_requests = dd_insert_requests,
776 .dispatch_request = dd_dispatch_request,
777 .prepare_request = dd_prepare_request,
778 .finish_request = dd_finish_request,
779 .next_request = elv_rb_latter_request,
780 .former_request = elv_rb_former_request,
781 .bio_merge = dd_bio_merge,
782 .request_merge = dd_request_merge,
783 .requests_merged = dd_merged_requests,
784 .request_merged = dd_request_merged,
785 .has_work = dd_has_work,
786 .init_sched = dd_init_queue,
787 .exit_sched = dd_exit_queue,
788 },
789
790#ifdef CONFIG_BLK_DEBUG_FS
791 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
792#endif
793 .elevator_attrs = deadline_attrs,
794 .elevator_name = "mq-deadline",
795 .elevator_alias = "deadline",
796 .elevator_owner = THIS_MODULE,
797};
798MODULE_ALIAS("mq-deadline-iosched");
799
800static int __init deadline_init(void)
801{
802 return elv_register(&mq_deadline);
803}
804
805static void __exit deadline_exit(void)
806{
807 elv_unregister(&mq_deadline);
808}
809
810module_init(deadline_init);
811module_exit(deadline_exit);
812
813MODULE_AUTHOR("Jens Axboe");
814MODULE_LICENSE("GPL");
815MODULE_DESCRIPTION("MQ deadline IO scheduler");
816