1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to setting various queue properties from drivers
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/pagemap.h>
11#include <linux/backing-dev-defs.h>
12#include <linux/gcd.h>
13#include <linux/lcm.h>
14#include <linux/jiffies.h>
15#include <linux/gfp.h>
16#include <linux/dma-mapping.h>
17
18#include "blk.h"
19#include "blk-rq-qos.h"
20#include "blk-wbt.h"
21
22void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23{
24 q->rq_timeout = timeout;
25}
26EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27
28/**
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
31 *
32 * Prepare queue limits for applying limits from underlying devices using
33 * blk_stack_limits().
34 */
35void blk_set_stacking_limits(struct queue_limits *lim)
36{
37 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44
45 /* Inherit limits from component devices */
46 lim->max_segments = USHRT_MAX;
47 lim->max_discard_segments = USHRT_MAX;
48 lim->max_hw_sectors = UINT_MAX;
49 lim->max_segment_size = UINT_MAX;
50 lim->max_sectors = UINT_MAX;
51 lim->max_dev_sectors = UINT_MAX;
52 lim->max_write_zeroes_sectors = UINT_MAX;
53 lim->max_zone_append_sectors = UINT_MAX;
54 lim->max_user_discard_sectors = UINT_MAX;
55}
56EXPORT_SYMBOL(blk_set_stacking_limits);
57
58static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
60{
61 /*
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
64 */
65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67}
68
69static int blk_validate_zoned_limits(struct queue_limits *lim)
70{
71 if (!lim->zoned) {
72 if (WARN_ON_ONCE(lim->max_open_zones) ||
73 WARN_ON_ONCE(lim->max_active_zones) ||
74 WARN_ON_ONCE(lim->zone_write_granularity) ||
75 WARN_ON_ONCE(lim->max_zone_append_sectors))
76 return -EINVAL;
77 return 0;
78 }
79
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 return -EINVAL;
82
83 if (lim->zone_write_granularity < lim->logical_block_size)
84 lim->zone_write_granularity = lim->logical_block_size;
85
86 if (lim->max_zone_append_sectors) {
87 /*
88 * The Zone Append size is limited by the maximum I/O size
89 * and the zone size given that it can't span zones.
90 */
91 lim->max_zone_append_sectors =
92 min3(lim->max_hw_sectors,
93 lim->max_zone_append_sectors,
94 lim->chunk_sectors);
95 }
96
97 return 0;
98}
99
100/*
101 * Check that the limits in lim are valid, initialize defaults for unset
102 * values, and cap values based on others where needed.
103 */
104static int blk_validate_limits(struct queue_limits *lim)
105{
106 unsigned int max_hw_sectors;
107
108 /*
109 * Unless otherwise specified, default to 512 byte logical blocks and a
110 * physical block size equal to the logical block size.
111 */
112 if (!lim->logical_block_size)
113 lim->logical_block_size = SECTOR_SIZE;
114 if (lim->physical_block_size < lim->logical_block_size)
115 lim->physical_block_size = lim->logical_block_size;
116
117 /*
118 * The minimum I/O size defaults to the physical block size unless
119 * explicitly overridden.
120 */
121 if (lim->io_min < lim->physical_block_size)
122 lim->io_min = lim->physical_block_size;
123
124 /*
125 * max_hw_sectors has a somewhat weird default for historical reason,
126 * but driver really should set their own instead of relying on this
127 * value.
128 *
129 * The block layer relies on the fact that every driver can
130 * handle at lest a page worth of data per I/O, and needs the value
131 * aligned to the logical block size.
132 */
133 if (!lim->max_hw_sectors)
134 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
135 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
136 return -EINVAL;
137 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
138 lim->logical_block_size >> SECTOR_SHIFT);
139
140 /*
141 * The actual max_sectors value is a complex beast and also takes the
142 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
143 * value into account. The ->max_sectors value is always calculated
144 * from these, so directly setting it won't have any effect.
145 */
146 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
147 lim->max_dev_sectors);
148 if (lim->max_user_sectors) {
149 if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
150 return -EINVAL;
151 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
152 } else {
153 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
154 }
155 lim->max_sectors = round_down(lim->max_sectors,
156 lim->logical_block_size >> SECTOR_SHIFT);
157
158 /*
159 * Random default for the maximum number of segments. Driver should not
160 * rely on this and set their own.
161 */
162 if (!lim->max_segments)
163 lim->max_segments = BLK_MAX_SEGMENTS;
164
165 lim->max_discard_sectors =
166 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
167
168 if (!lim->max_discard_segments)
169 lim->max_discard_segments = 1;
170
171 if (lim->discard_granularity < lim->physical_block_size)
172 lim->discard_granularity = lim->physical_block_size;
173
174 /*
175 * By default there is no limit on the segment boundary alignment,
176 * but if there is one it can't be smaller than the page size as
177 * that would break all the normal I/O patterns.
178 */
179 if (!lim->seg_boundary_mask)
180 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
181 if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
182 return -EINVAL;
183
184 /*
185 * Stacking device may have both virtual boundary and max segment
186 * size limit, so allow this setting now, and long-term the two
187 * might need to move out of stacking limits since we have immutable
188 * bvec and lower layer bio splitting is supposed to handle the two
189 * correctly.
190 */
191 if (!lim->virt_boundary_mask) {
192 /*
193 * The maximum segment size has an odd historic 64k default that
194 * drivers probably should override. Just like the I/O size we
195 * require drivers to at least handle a full page per segment.
196 */
197 if (!lim->max_segment_size)
198 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
199 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
200 return -EINVAL;
201 }
202
203 /*
204 * We require drivers to at least do logical block aligned I/O, but
205 * historically could not check for that due to the separate calls
206 * to set the limits. Once the transition is finished the check
207 * below should be narrowed down to check the logical block size.
208 */
209 if (!lim->dma_alignment)
210 lim->dma_alignment = SECTOR_SIZE - 1;
211 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
212 return -EINVAL;
213
214 if (lim->alignment_offset) {
215 lim->alignment_offset &= (lim->physical_block_size - 1);
216 lim->misaligned = 0;
217 }
218
219 return blk_validate_zoned_limits(lim);
220}
221
222/*
223 * Set the default limits for a newly allocated queue. @lim contains the
224 * initial limits set by the driver, which could be no limit in which case
225 * all fields are cleared to zero.
226 */
227int blk_set_default_limits(struct queue_limits *lim)
228{
229 /*
230 * Most defaults are set by capping the bounds in blk_validate_limits,
231 * but max_user_discard_sectors is special and needs an explicit
232 * initialization to the max value here.
233 */
234 lim->max_user_discard_sectors = UINT_MAX;
235 return blk_validate_limits(lim);
236}
237
238/**
239 * queue_limits_commit_update - commit an atomic update of queue limits
240 * @q: queue to update
241 * @lim: limits to apply
242 *
243 * Apply the limits in @lim that were obtained from queue_limits_start_update()
244 * and updated by the caller to @q.
245 *
246 * Returns 0 if successful, else a negative error code.
247 */
248int queue_limits_commit_update(struct request_queue *q,
249 struct queue_limits *lim)
250 __releases(q->limits_lock)
251{
252 int error = blk_validate_limits(lim);
253
254 if (!error) {
255 q->limits = *lim;
256 if (q->disk)
257 blk_apply_bdi_limits(bdi: q->disk->bdi, lim);
258 }
259 mutex_unlock(lock: &q->limits_lock);
260 return error;
261}
262EXPORT_SYMBOL_GPL(queue_limits_commit_update);
263
264/**
265 * queue_limits_set - apply queue limits to queue
266 * @q: queue to update
267 * @lim: limits to apply
268 *
269 * Apply the limits in @lim that were freshly initialized to @q.
270 * To update existing limits use queue_limits_start_update() and
271 * queue_limits_commit_update() instead.
272 *
273 * Returns 0 if successful, else a negative error code.
274 */
275int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
276{
277 mutex_lock(&q->limits_lock);
278 return queue_limits_commit_update(q, lim);
279}
280EXPORT_SYMBOL_GPL(queue_limits_set);
281
282/**
283 * blk_queue_bounce_limit - set bounce buffer limit for queue
284 * @q: the request queue for the device
285 * @bounce: bounce limit to enforce
286 *
287 * Description:
288 * Force bouncing for ISA DMA ranges or highmem.
289 *
290 * DEPRECATED, don't use in new code.
291 **/
292void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
293{
294 q->limits.bounce = bounce;
295}
296EXPORT_SYMBOL(blk_queue_bounce_limit);
297
298/**
299 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
300 * @q: the request queue for the device
301 * @max_hw_sectors: max hardware sectors in the usual 512b unit
302 *
303 * Description:
304 * Enables a low level driver to set a hard upper limit,
305 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
306 * the device driver based upon the capabilities of the I/O
307 * controller.
308 *
309 * max_dev_sectors is a hard limit imposed by the storage device for
310 * READ/WRITE requests. It is set by the disk driver.
311 *
312 * max_sectors is a soft limit imposed by the block layer for
313 * filesystem type requests. This value can be overridden on a
314 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
315 * The soft limit can not exceed max_hw_sectors.
316 **/
317void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
318{
319 struct queue_limits *limits = &q->limits;
320 unsigned int max_sectors;
321
322 if ((max_hw_sectors << 9) < PAGE_SIZE) {
323 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
324 pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
325 }
326
327 max_hw_sectors = round_down(max_hw_sectors,
328 limits->logical_block_size >> SECTOR_SHIFT);
329 limits->max_hw_sectors = max_hw_sectors;
330
331 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
332
333 if (limits->max_user_sectors)
334 max_sectors = min(max_sectors, limits->max_user_sectors);
335 else
336 max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
337
338 max_sectors = round_down(max_sectors,
339 limits->logical_block_size >> SECTOR_SHIFT);
340 limits->max_sectors = max_sectors;
341
342 if (!q->disk)
343 return;
344 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
345}
346EXPORT_SYMBOL(blk_queue_max_hw_sectors);
347
348/**
349 * blk_queue_chunk_sectors - set size of the chunk for this queue
350 * @q: the request queue for the device
351 * @chunk_sectors: chunk sectors in the usual 512b unit
352 *
353 * Description:
354 * If a driver doesn't want IOs to cross a given chunk size, it can set
355 * this limit and prevent merging across chunks. Note that the block layer
356 * must accept a page worth of data at any offset. So if the crossing of
357 * chunks is a hard limitation in the driver, it must still be prepared
358 * to split single page bios.
359 **/
360void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
361{
362 q->limits.chunk_sectors = chunk_sectors;
363}
364EXPORT_SYMBOL(blk_queue_chunk_sectors);
365
366/**
367 * blk_queue_max_discard_sectors - set max sectors for a single discard
368 * @q: the request queue for the device
369 * @max_discard_sectors: maximum number of sectors to discard
370 **/
371void blk_queue_max_discard_sectors(struct request_queue *q,
372 unsigned int max_discard_sectors)
373{
374 struct queue_limits *lim = &q->limits;
375
376 lim->max_hw_discard_sectors = max_discard_sectors;
377 lim->max_discard_sectors =
378 min(max_discard_sectors, lim->max_user_discard_sectors);
379}
380EXPORT_SYMBOL(blk_queue_max_discard_sectors);
381
382/**
383 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
384 * @q: the request queue for the device
385 * @max_sectors: maximum number of sectors to secure_erase
386 **/
387void blk_queue_max_secure_erase_sectors(struct request_queue *q,
388 unsigned int max_sectors)
389{
390 q->limits.max_secure_erase_sectors = max_sectors;
391}
392EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
393
394/**
395 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
396 * write zeroes
397 * @q: the request queue for the device
398 * @max_write_zeroes_sectors: maximum number of sectors to write per command
399 **/
400void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
401 unsigned int max_write_zeroes_sectors)
402{
403 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
404}
405EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
406
407/**
408 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
409 * @q: the request queue for the device
410 * @max_zone_append_sectors: maximum number of sectors to write per command
411 **/
412void blk_queue_max_zone_append_sectors(struct request_queue *q,
413 unsigned int max_zone_append_sectors)
414{
415 unsigned int max_sectors;
416
417 if (WARN_ON(!blk_queue_is_zoned(q)))
418 return;
419
420 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
421 max_sectors = min(q->limits.chunk_sectors, max_sectors);
422
423 /*
424 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
425 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
426 * or the max_hw_sectors limit not set.
427 */
428 WARN_ON(!max_sectors);
429
430 q->limits.max_zone_append_sectors = max_sectors;
431}
432EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
433
434/**
435 * blk_queue_max_segments - set max hw segments for a request for this queue
436 * @q: the request queue for the device
437 * @max_segments: max number of segments
438 *
439 * Description:
440 * Enables a low level driver to set an upper limit on the number of
441 * hw data segments in a request.
442 **/
443void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
444{
445 if (!max_segments) {
446 max_segments = 1;
447 pr_info("%s: set to minimum %u\n", __func__, max_segments);
448 }
449
450 q->limits.max_segments = max_segments;
451}
452EXPORT_SYMBOL(blk_queue_max_segments);
453
454/**
455 * blk_queue_max_discard_segments - set max segments for discard requests
456 * @q: the request queue for the device
457 * @max_segments: max number of segments
458 *
459 * Description:
460 * Enables a low level driver to set an upper limit on the number of
461 * segments in a discard request.
462 **/
463void blk_queue_max_discard_segments(struct request_queue *q,
464 unsigned short max_segments)
465{
466 q->limits.max_discard_segments = max_segments;
467}
468EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
469
470/**
471 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
472 * @q: the request queue for the device
473 * @max_size: max size of segment in bytes
474 *
475 * Description:
476 * Enables a low level driver to set an upper limit on the size of a
477 * coalesced segment
478 **/
479void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
480{
481 if (max_size < PAGE_SIZE) {
482 max_size = PAGE_SIZE;
483 pr_info("%s: set to minimum %u\n", __func__, max_size);
484 }
485
486 /* see blk_queue_virt_boundary() for the explanation */
487 WARN_ON_ONCE(q->limits.virt_boundary_mask);
488
489 q->limits.max_segment_size = max_size;
490}
491EXPORT_SYMBOL(blk_queue_max_segment_size);
492
493/**
494 * blk_queue_logical_block_size - set logical block size for the queue
495 * @q: the request queue for the device
496 * @size: the logical block size, in bytes
497 *
498 * Description:
499 * This should be set to the lowest possible block size that the
500 * storage device can address. The default of 512 covers most
501 * hardware.
502 **/
503void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
504{
505 struct queue_limits *limits = &q->limits;
506
507 limits->logical_block_size = size;
508
509 if (limits->discard_granularity < limits->logical_block_size)
510 limits->discard_granularity = limits->logical_block_size;
511
512 if (limits->physical_block_size < size)
513 limits->physical_block_size = size;
514
515 if (limits->io_min < limits->physical_block_size)
516 limits->io_min = limits->physical_block_size;
517
518 limits->max_hw_sectors =
519 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
520 limits->max_sectors =
521 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
522}
523EXPORT_SYMBOL(blk_queue_logical_block_size);
524
525/**
526 * blk_queue_physical_block_size - set physical block size for the queue
527 * @q: the request queue for the device
528 * @size: the physical block size, in bytes
529 *
530 * Description:
531 * This should be set to the lowest possible sector size that the
532 * hardware can operate on without reverting to read-modify-write
533 * operations.
534 */
535void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
536{
537 q->limits.physical_block_size = size;
538
539 if (q->limits.physical_block_size < q->limits.logical_block_size)
540 q->limits.physical_block_size = q->limits.logical_block_size;
541
542 if (q->limits.discard_granularity < q->limits.physical_block_size)
543 q->limits.discard_granularity = q->limits.physical_block_size;
544
545 if (q->limits.io_min < q->limits.physical_block_size)
546 q->limits.io_min = q->limits.physical_block_size;
547}
548EXPORT_SYMBOL(blk_queue_physical_block_size);
549
550/**
551 * blk_queue_zone_write_granularity - set zone write granularity for the queue
552 * @q: the request queue for the zoned device
553 * @size: the zone write granularity size, in bytes
554 *
555 * Description:
556 * This should be set to the lowest possible size allowing to write in
557 * sequential zones of a zoned block device.
558 */
559void blk_queue_zone_write_granularity(struct request_queue *q,
560 unsigned int size)
561{
562 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
563 return;
564
565 q->limits.zone_write_granularity = size;
566
567 if (q->limits.zone_write_granularity < q->limits.logical_block_size)
568 q->limits.zone_write_granularity = q->limits.logical_block_size;
569}
570EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
571
572/**
573 * blk_queue_alignment_offset - set physical block alignment offset
574 * @q: the request queue for the device
575 * @offset: alignment offset in bytes
576 *
577 * Description:
578 * Some devices are naturally misaligned to compensate for things like
579 * the legacy DOS partition table 63-sector offset. Low-level drivers
580 * should call this function for devices whose first sector is not
581 * naturally aligned.
582 */
583void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
584{
585 q->limits.alignment_offset =
586 offset & (q->limits.physical_block_size - 1);
587 q->limits.misaligned = 0;
588}
589EXPORT_SYMBOL(blk_queue_alignment_offset);
590
591void disk_update_readahead(struct gendisk *disk)
592{
593 blk_apply_bdi_limits(bdi: disk->bdi, lim: &disk->queue->limits);
594}
595EXPORT_SYMBOL_GPL(disk_update_readahead);
596
597/**
598 * blk_limits_io_min - set minimum request size for a device
599 * @limits: the queue limits
600 * @min: smallest I/O size in bytes
601 *
602 * Description:
603 * Some devices have an internal block size bigger than the reported
604 * hardware sector size. This function can be used to signal the
605 * smallest I/O the device can perform without incurring a performance
606 * penalty.
607 */
608void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
609{
610 limits->io_min = min;
611
612 if (limits->io_min < limits->logical_block_size)
613 limits->io_min = limits->logical_block_size;
614
615 if (limits->io_min < limits->physical_block_size)
616 limits->io_min = limits->physical_block_size;
617}
618EXPORT_SYMBOL(blk_limits_io_min);
619
620/**
621 * blk_queue_io_min - set minimum request size for the queue
622 * @q: the request queue for the device
623 * @min: smallest I/O size in bytes
624 *
625 * Description:
626 * Storage devices may report a granularity or preferred minimum I/O
627 * size which is the smallest request the device can perform without
628 * incurring a performance penalty. For disk drives this is often the
629 * physical block size. For RAID arrays it is often the stripe chunk
630 * size. A properly aligned multiple of minimum_io_size is the
631 * preferred request size for workloads where a high number of I/O
632 * operations is desired.
633 */
634void blk_queue_io_min(struct request_queue *q, unsigned int min)
635{
636 blk_limits_io_min(&q->limits, min);
637}
638EXPORT_SYMBOL(blk_queue_io_min);
639
640/**
641 * blk_limits_io_opt - set optimal request size for a device
642 * @limits: the queue limits
643 * @opt: smallest I/O size in bytes
644 *
645 * Description:
646 * Storage devices may report an optimal I/O size, which is the
647 * device's preferred unit for sustained I/O. This is rarely reported
648 * for disk drives. For RAID arrays it is usually the stripe width or
649 * the internal track size. A properly aligned multiple of
650 * optimal_io_size is the preferred request size for workloads where
651 * sustained throughput is desired.
652 */
653void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
654{
655 limits->io_opt = opt;
656}
657EXPORT_SYMBOL(blk_limits_io_opt);
658
659/**
660 * blk_queue_io_opt - set optimal request size for the queue
661 * @q: the request queue for the device
662 * @opt: optimal request size in bytes
663 *
664 * Description:
665 * Storage devices may report an optimal I/O size, which is the
666 * device's preferred unit for sustained I/O. This is rarely reported
667 * for disk drives. For RAID arrays it is usually the stripe width or
668 * the internal track size. A properly aligned multiple of
669 * optimal_io_size is the preferred request size for workloads where
670 * sustained throughput is desired.
671 */
672void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
673{
674 blk_limits_io_opt(&q->limits, opt);
675 if (!q->disk)
676 return;
677 q->disk->bdi->ra_pages =
678 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
679}
680EXPORT_SYMBOL(blk_queue_io_opt);
681
682static int queue_limit_alignment_offset(const struct queue_limits *lim,
683 sector_t sector)
684{
685 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
686 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
687 << SECTOR_SHIFT;
688
689 return (granularity + lim->alignment_offset - alignment) % granularity;
690}
691
692static unsigned int queue_limit_discard_alignment(
693 const struct queue_limits *lim, sector_t sector)
694{
695 unsigned int alignment, granularity, offset;
696
697 if (!lim->max_discard_sectors)
698 return 0;
699
700 /* Why are these in bytes, not sectors? */
701 alignment = lim->discard_alignment >> SECTOR_SHIFT;
702 granularity = lim->discard_granularity >> SECTOR_SHIFT;
703 if (!granularity)
704 return 0;
705
706 /* Offset of the partition start in 'granularity' sectors */
707 offset = sector_div(sector, granularity);
708
709 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
710 offset = (granularity + alignment - offset) % granularity;
711
712 /* Turn it back into bytes, gaah */
713 return offset << SECTOR_SHIFT;
714}
715
716static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
717{
718 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
719 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
720 sectors = PAGE_SIZE >> SECTOR_SHIFT;
721 return sectors;
722}
723
724/**
725 * blk_stack_limits - adjust queue_limits for stacked devices
726 * @t: the stacking driver limits (top device)
727 * @b: the underlying queue limits (bottom, component device)
728 * @start: first data sector within component device
729 *
730 * Description:
731 * This function is used by stacking drivers like MD and DM to ensure
732 * that all component devices have compatible block sizes and
733 * alignments. The stacking driver must provide a queue_limits
734 * struct (top) and then iteratively call the stacking function for
735 * all component (bottom) devices. The stacking function will
736 * attempt to combine the values and ensure proper alignment.
737 *
738 * Returns 0 if the top and bottom queue_limits are compatible. The
739 * top device's block sizes and alignment offsets may be adjusted to
740 * ensure alignment with the bottom device. If no compatible sizes
741 * and alignments exist, -1 is returned and the resulting top
742 * queue_limits will have the misaligned flag set to indicate that
743 * the alignment_offset is undefined.
744 */
745int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
746 sector_t start)
747{
748 unsigned int top, bottom, alignment, ret = 0;
749
750 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
751 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
752 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
753 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
754 b->max_write_zeroes_sectors);
755 t->max_zone_append_sectors = min(t->max_zone_append_sectors,
756 b->max_zone_append_sectors);
757 t->bounce = max(t->bounce, b->bounce);
758
759 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
760 b->seg_boundary_mask);
761 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
762 b->virt_boundary_mask);
763
764 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
765 t->max_discard_segments = min_not_zero(t->max_discard_segments,
766 b->max_discard_segments);
767 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
768 b->max_integrity_segments);
769
770 t->max_segment_size = min_not_zero(t->max_segment_size,
771 b->max_segment_size);
772
773 t->misaligned |= b->misaligned;
774
775 alignment = queue_limit_alignment_offset(lim: b, sector: start);
776
777 /* Bottom device has different alignment. Check that it is
778 * compatible with the current top alignment.
779 */
780 if (t->alignment_offset != alignment) {
781
782 top = max(t->physical_block_size, t->io_min)
783 + t->alignment_offset;
784 bottom = max(b->physical_block_size, b->io_min) + alignment;
785
786 /* Verify that top and bottom intervals line up */
787 if (max(top, bottom) % min(top, bottom)) {
788 t->misaligned = 1;
789 ret = -1;
790 }
791 }
792
793 t->logical_block_size = max(t->logical_block_size,
794 b->logical_block_size);
795
796 t->physical_block_size = max(t->physical_block_size,
797 b->physical_block_size);
798
799 t->io_min = max(t->io_min, b->io_min);
800 t->io_opt = lcm_not_zero(a: t->io_opt, b: b->io_opt);
801 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
802
803 /* Set non-power-of-2 compatible chunk_sectors boundary */
804 if (b->chunk_sectors)
805 t->chunk_sectors = gcd(a: t->chunk_sectors, b: b->chunk_sectors);
806
807 /* Physical block size a multiple of the logical block size? */
808 if (t->physical_block_size & (t->logical_block_size - 1)) {
809 t->physical_block_size = t->logical_block_size;
810 t->misaligned = 1;
811 ret = -1;
812 }
813
814 /* Minimum I/O a multiple of the physical block size? */
815 if (t->io_min & (t->physical_block_size - 1)) {
816 t->io_min = t->physical_block_size;
817 t->misaligned = 1;
818 ret = -1;
819 }
820
821 /* Optimal I/O a multiple of the physical block size? */
822 if (t->io_opt & (t->physical_block_size - 1)) {
823 t->io_opt = 0;
824 t->misaligned = 1;
825 ret = -1;
826 }
827
828 /* chunk_sectors a multiple of the physical block size? */
829 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
830 t->chunk_sectors = 0;
831 t->misaligned = 1;
832 ret = -1;
833 }
834
835 t->raid_partial_stripes_expensive =
836 max(t->raid_partial_stripes_expensive,
837 b->raid_partial_stripes_expensive);
838
839 /* Find lowest common alignment_offset */
840 t->alignment_offset = lcm_not_zero(a: t->alignment_offset, b: alignment)
841 % max(t->physical_block_size, t->io_min);
842
843 /* Verify that new alignment_offset is on a logical block boundary */
844 if (t->alignment_offset & (t->logical_block_size - 1)) {
845 t->misaligned = 1;
846 ret = -1;
847 }
848
849 t->max_sectors = blk_round_down_sectors(sectors: t->max_sectors, lbs: t->logical_block_size);
850 t->max_hw_sectors = blk_round_down_sectors(sectors: t->max_hw_sectors, lbs: t->logical_block_size);
851 t->max_dev_sectors = blk_round_down_sectors(sectors: t->max_dev_sectors, lbs: t->logical_block_size);
852
853 /* Discard alignment and granularity */
854 if (b->discard_granularity) {
855 alignment = queue_limit_discard_alignment(lim: b, sector: start);
856
857 if (t->discard_granularity != 0 &&
858 t->discard_alignment != alignment) {
859 top = t->discard_granularity + t->discard_alignment;
860 bottom = b->discard_granularity + alignment;
861
862 /* Verify that top and bottom intervals line up */
863 if ((max(top, bottom) % min(top, bottom)) != 0)
864 t->discard_misaligned = 1;
865 }
866
867 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
868 b->max_discard_sectors);
869 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
870 b->max_hw_discard_sectors);
871 t->discard_granularity = max(t->discard_granularity,
872 b->discard_granularity);
873 t->discard_alignment = lcm_not_zero(a: t->discard_alignment, b: alignment) %
874 t->discard_granularity;
875 }
876 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
877 b->max_secure_erase_sectors);
878 t->zone_write_granularity = max(t->zone_write_granularity,
879 b->zone_write_granularity);
880 t->zoned = max(t->zoned, b->zoned);
881 if (!t->zoned) {
882 t->zone_write_granularity = 0;
883 t->max_zone_append_sectors = 0;
884 }
885 return ret;
886}
887EXPORT_SYMBOL(blk_stack_limits);
888
889/**
890 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
891 * @t: the stacking driver limits (top device)
892 * @bdev: the underlying block device (bottom)
893 * @offset: offset to beginning of data within component device
894 * @pfx: prefix to use for warnings logged
895 *
896 * Description:
897 * This function is used by stacking drivers like MD and DM to ensure
898 * that all component devices have compatible block sizes and
899 * alignments. The stacking driver must provide a queue_limits
900 * struct (top) and then iteratively call the stacking function for
901 * all component (bottom) devices. The stacking function will
902 * attempt to combine the values and ensure proper alignment.
903 */
904void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
905 sector_t offset, const char *pfx)
906{
907 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
908 get_start_sect(bdev) + offset))
909 pr_notice("%s: Warning: Device %pg is misaligned\n",
910 pfx, bdev);
911}
912EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
913
914/**
915 * blk_queue_update_dma_pad - update pad mask
916 * @q: the request queue for the device
917 * @mask: pad mask
918 *
919 * Update dma pad mask.
920 *
921 * Appending pad buffer to a request modifies the last entry of a
922 * scatter list such that it includes the pad buffer.
923 **/
924void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
925{
926 if (mask > q->dma_pad_mask)
927 q->dma_pad_mask = mask;
928}
929EXPORT_SYMBOL(blk_queue_update_dma_pad);
930
931/**
932 * blk_queue_segment_boundary - set boundary rules for segment merging
933 * @q: the request queue for the device
934 * @mask: the memory boundary mask
935 **/
936void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
937{
938 if (mask < PAGE_SIZE - 1) {
939 mask = PAGE_SIZE - 1;
940 pr_info("%s: set to minimum %lx\n", __func__, mask);
941 }
942
943 q->limits.seg_boundary_mask = mask;
944}
945EXPORT_SYMBOL(blk_queue_segment_boundary);
946
947/**
948 * blk_queue_virt_boundary - set boundary rules for bio merging
949 * @q: the request queue for the device
950 * @mask: the memory boundary mask
951 **/
952void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
953{
954 q->limits.virt_boundary_mask = mask;
955
956 /*
957 * Devices that require a virtual boundary do not support scatter/gather
958 * I/O natively, but instead require a descriptor list entry for each
959 * page (which might not be idential to the Linux PAGE_SIZE). Because
960 * of that they are not limited by our notion of "segment size".
961 */
962 if (mask)
963 q->limits.max_segment_size = UINT_MAX;
964}
965EXPORT_SYMBOL(blk_queue_virt_boundary);
966
967/**
968 * blk_queue_dma_alignment - set dma length and memory alignment
969 * @q: the request queue for the device
970 * @mask: alignment mask
971 *
972 * description:
973 * set required memory and length alignment for direct dma transactions.
974 * this is used when building direct io requests for the queue.
975 *
976 **/
977void blk_queue_dma_alignment(struct request_queue *q, int mask)
978{
979 q->limits.dma_alignment = mask;
980}
981EXPORT_SYMBOL(blk_queue_dma_alignment);
982
983/**
984 * blk_queue_update_dma_alignment - update dma length and memory alignment
985 * @q: the request queue for the device
986 * @mask: alignment mask
987 *
988 * description:
989 * update required memory and length alignment for direct dma transactions.
990 * If the requested alignment is larger than the current alignment, then
991 * the current queue alignment is updated to the new value, otherwise it
992 * is left alone. The design of this is to allow multiple objects
993 * (driver, device, transport etc) to set their respective
994 * alignments without having them interfere.
995 *
996 **/
997void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
998{
999 BUG_ON(mask > PAGE_SIZE);
1000
1001 if (mask > q->limits.dma_alignment)
1002 q->limits.dma_alignment = mask;
1003}
1004EXPORT_SYMBOL(blk_queue_update_dma_alignment);
1005
1006/**
1007 * blk_set_queue_depth - tell the block layer about the device queue depth
1008 * @q: the request queue for the device
1009 * @depth: queue depth
1010 *
1011 */
1012void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1013{
1014 q->queue_depth = depth;
1015 rq_qos_queue_depth_changed(q);
1016}
1017EXPORT_SYMBOL(blk_set_queue_depth);
1018
1019/**
1020 * blk_queue_write_cache - configure queue's write cache
1021 * @q: the request queue for the device
1022 * @wc: write back cache on or off
1023 * @fua: device supports FUA writes, if true
1024 *
1025 * Tell the block layer about the write cache of @q.
1026 */
1027void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
1028{
1029 if (wc) {
1030 blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
1031 blk_queue_flag_set(QUEUE_FLAG_WC, q);
1032 } else {
1033 blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
1034 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
1035 }
1036 if (fua)
1037 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
1038 else
1039 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
1040}
1041EXPORT_SYMBOL_GPL(blk_queue_write_cache);
1042
1043/**
1044 * blk_queue_required_elevator_features - Set a queue required elevator features
1045 * @q: the request queue for the target device
1046 * @features: Required elevator features OR'ed together
1047 *
1048 * Tell the block layer that for the device controlled through @q, only the
1049 * only elevators that can be used are those that implement at least the set of
1050 * features specified by @features.
1051 */
1052void blk_queue_required_elevator_features(struct request_queue *q,
1053 unsigned int features)
1054{
1055 q->required_elevator_features = features;
1056}
1057EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
1058
1059/**
1060 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
1061 * @q: the request queue for the device
1062 * @dev: the device pointer for dma
1063 *
1064 * Tell the block layer about merging the segments by dma map of @q.
1065 */
1066bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1067 struct device *dev)
1068{
1069 unsigned long boundary = dma_get_merge_boundary(dev);
1070
1071 if (!boundary)
1072 return false;
1073
1074 /* No need to update max_segment_size. see blk_queue_virt_boundary() */
1075 blk_queue_virt_boundary(q, boundary);
1076
1077 return true;
1078}
1079EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
1080
1081/**
1082 * disk_set_zoned - inidicate a zoned device
1083 * @disk: gendisk to configure
1084 */
1085void disk_set_zoned(struct gendisk *disk)
1086{
1087 struct request_queue *q = disk->queue;
1088
1089 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
1090
1091 /*
1092 * Set the zone write granularity to the device logical block
1093 * size by default. The driver can change this value if needed.
1094 */
1095 q->limits.zoned = true;
1096 blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
1097}
1098EXPORT_SYMBOL_GPL(disk_set_zoned);
1099
1100int bdev_alignment_offset(struct block_device *bdev)
1101{
1102 struct request_queue *q = bdev_get_queue(bdev);
1103
1104 if (q->limits.misaligned)
1105 return -1;
1106 if (bdev_is_partition(bdev))
1107 return queue_limit_alignment_offset(lim: &q->limits,
1108 sector: bdev->bd_start_sect);
1109 return q->limits.alignment_offset;
1110}
1111EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1112
1113unsigned int bdev_discard_alignment(struct block_device *bdev)
1114{
1115 struct request_queue *q = bdev_get_queue(bdev);
1116
1117 if (bdev_is_partition(bdev))
1118 return queue_limit_discard_alignment(lim: &q->limits,
1119 sector: bdev->bd_start_sect);
1120 return q->limits.discard_alignment;
1121}
1122EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1123

source code of linux/block/blk-settings.c