1 | /* |
2 | * Copyright (C) 2001 Jens Axboe <axboe@suse.de> |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. |
7 | * |
8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public Licens |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- |
17 | */ |
18 | #ifndef __LINUX_BIO_H |
19 | #define __LINUX_BIO_H |
20 | |
21 | #include <linux/highmem.h> |
22 | #include <linux/mempool.h> |
23 | #include <linux/ioprio.h> |
24 | |
25 | #ifdef CONFIG_BLOCK |
26 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ |
27 | #include <linux/blk_types.h> |
28 | |
29 | #define BIO_DEBUG |
30 | |
31 | #ifdef BIO_DEBUG |
32 | #define BIO_BUG_ON BUG_ON |
33 | #else |
34 | #define BIO_BUG_ON |
35 | #endif |
36 | |
37 | #define BIO_MAX_PAGES 256 |
38 | |
39 | #define bio_prio(bio) (bio)->bi_ioprio |
40 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) |
41 | |
42 | #define bio_iter_iovec(bio, iter) \ |
43 | bvec_iter_bvec((bio)->bi_io_vec, (iter)) |
44 | |
45 | #define bio_iter_page(bio, iter) \ |
46 | bvec_iter_page((bio)->bi_io_vec, (iter)) |
47 | #define bio_iter_len(bio, iter) \ |
48 | bvec_iter_len((bio)->bi_io_vec, (iter)) |
49 | #define bio_iter_offset(bio, iter) \ |
50 | bvec_iter_offset((bio)->bi_io_vec, (iter)) |
51 | |
52 | #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) |
53 | #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) |
54 | #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) |
55 | |
56 | #define bio_multiple_segments(bio) \ |
57 | ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) |
58 | |
59 | #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) |
60 | #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) |
61 | |
62 | #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) |
63 | #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) |
64 | |
65 | /* |
66 | * Return the data direction, READ or WRITE. |
67 | */ |
68 | #define bio_data_dir(bio) \ |
69 | (op_is_write(bio_op(bio)) ? WRITE : READ) |
70 | |
71 | /* |
72 | * Check whether this bio carries any data or not. A NULL bio is allowed. |
73 | */ |
74 | static inline bool bio_has_data(struct bio *bio) |
75 | { |
76 | if (bio && |
77 | bio->bi_iter.bi_size && |
78 | bio_op(bio) != REQ_OP_DISCARD && |
79 | bio_op(bio) != REQ_OP_SECURE_ERASE && |
80 | bio_op(bio) != REQ_OP_WRITE_ZEROES) |
81 | return true; |
82 | |
83 | return false; |
84 | } |
85 | |
86 | static inline bool bio_no_advance_iter(struct bio *bio) |
87 | { |
88 | return bio_op(bio) == REQ_OP_DISCARD || |
89 | bio_op(bio) == REQ_OP_SECURE_ERASE || |
90 | bio_op(bio) == REQ_OP_WRITE_SAME || |
91 | bio_op(bio) == REQ_OP_WRITE_ZEROES; |
92 | } |
93 | |
94 | static inline bool bio_mergeable(struct bio *bio) |
95 | { |
96 | if (bio->bi_opf & REQ_NOMERGE_FLAGS) |
97 | return false; |
98 | |
99 | return true; |
100 | } |
101 | |
102 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
103 | { |
104 | if (bio_has_data(bio)) |
105 | return bio_iovec(bio).bv_len; |
106 | else /* dataless requests such as discard */ |
107 | return bio->bi_iter.bi_size; |
108 | } |
109 | |
110 | static inline void *bio_data(struct bio *bio) |
111 | { |
112 | if (bio_has_data(bio)) |
113 | return page_address(bio_page(bio)) + bio_offset(bio); |
114 | |
115 | return NULL; |
116 | } |
117 | |
118 | static inline bool bio_full(struct bio *bio) |
119 | { |
120 | return bio->bi_vcnt >= bio->bi_max_vecs; |
121 | } |
122 | |
123 | #define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \ |
124 | for (bv = bvec_init_iter_all(&iter_all); \ |
125 | (iter_all.done < (bvl)->bv_len) && \ |
126 | (mp_bvec_next_segment((bvl), &iter_all), 1); \ |
127 | iter_all.done += bv->bv_len, i += 1) |
128 | |
129 | /* |
130 | * drivers should _never_ use the all version - the bio may have been split |
131 | * before it got to the driver and the driver won't own all of it |
132 | */ |
133 | #define bio_for_each_segment_all(bvl, bio, i, iter_all) \ |
134 | for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \ |
135 | mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all) |
136 | |
137 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, |
138 | unsigned bytes) |
139 | { |
140 | iter->bi_sector += bytes >> 9; |
141 | |
142 | if (bio_no_advance_iter(bio)) |
143 | iter->bi_size -= bytes; |
144 | else |
145 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); |
146 | /* TODO: It is reasonable to complete bio with error here. */ |
147 | } |
148 | |
149 | #define __bio_for_each_segment(bvl, bio, iter, start) \ |
150 | for (iter = (start); \ |
151 | (iter).bi_size && \ |
152 | ((bvl = bio_iter_iovec((bio), (iter))), 1); \ |
153 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) |
154 | |
155 | #define bio_for_each_segment(bvl, bio, iter) \ |
156 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) |
157 | |
158 | #define __bio_for_each_bvec(bvl, bio, iter, start) \ |
159 | for (iter = (start); \ |
160 | (iter).bi_size && \ |
161 | ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ |
162 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) |
163 | |
164 | /* iterate over multi-page bvec */ |
165 | #define bio_for_each_bvec(bvl, bio, iter) \ |
166 | __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) |
167 | |
168 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
169 | |
170 | static inline unsigned bio_segments(struct bio *bio) |
171 | { |
172 | unsigned segs = 0; |
173 | struct bio_vec bv; |
174 | struct bvec_iter iter; |
175 | |
176 | /* |
177 | * We special case discard/write same/write zeroes, because they |
178 | * interpret bi_size differently: |
179 | */ |
180 | |
181 | switch (bio_op(bio)) { |
182 | case REQ_OP_DISCARD: |
183 | case REQ_OP_SECURE_ERASE: |
184 | case REQ_OP_WRITE_ZEROES: |
185 | return 0; |
186 | case REQ_OP_WRITE_SAME: |
187 | return 1; |
188 | default: |
189 | break; |
190 | } |
191 | |
192 | bio_for_each_segment(bv, bio, iter) |
193 | segs++; |
194 | |
195 | return segs; |
196 | } |
197 | |
198 | /* |
199 | * get a reference to a bio, so it won't disappear. the intended use is |
200 | * something like: |
201 | * |
202 | * bio_get(bio); |
203 | * submit_bio(rw, bio); |
204 | * if (bio->bi_flags ...) |
205 | * do_something |
206 | * bio_put(bio); |
207 | * |
208 | * without the bio_get(), it could potentially complete I/O before submit_bio |
209 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) |
210 | * runs |
211 | */ |
212 | static inline void bio_get(struct bio *bio) |
213 | { |
214 | bio->bi_flags |= (1 << BIO_REFFED); |
215 | smp_mb__before_atomic(); |
216 | atomic_inc(&bio->__bi_cnt); |
217 | } |
218 | |
219 | static inline void bio_cnt_set(struct bio *bio, unsigned int count) |
220 | { |
221 | if (count != 1) { |
222 | bio->bi_flags |= (1 << BIO_REFFED); |
223 | smp_mb__before_atomic(); |
224 | } |
225 | atomic_set(&bio->__bi_cnt, count); |
226 | } |
227 | |
228 | static inline bool bio_flagged(struct bio *bio, unsigned int bit) |
229 | { |
230 | return (bio->bi_flags & (1U << bit)) != 0; |
231 | } |
232 | |
233 | static inline void bio_set_flag(struct bio *bio, unsigned int bit) |
234 | { |
235 | bio->bi_flags |= (1U << bit); |
236 | } |
237 | |
238 | static inline void bio_clear_flag(struct bio *bio, unsigned int bit) |
239 | { |
240 | bio->bi_flags &= ~(1U << bit); |
241 | } |
242 | |
243 | static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) |
244 | { |
245 | *bv = bio_iovec(bio); |
246 | } |
247 | |
248 | static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) |
249 | { |
250 | struct bvec_iter iter = bio->bi_iter; |
251 | int idx; |
252 | |
253 | if (unlikely(!bio_multiple_segments(bio))) { |
254 | *bv = bio_iovec(bio); |
255 | return; |
256 | } |
257 | |
258 | bio_advance_iter(bio, &iter, iter.bi_size); |
259 | |
260 | if (!iter.bi_bvec_done) |
261 | idx = iter.bi_idx - 1; |
262 | else /* in the middle of bvec */ |
263 | idx = iter.bi_idx; |
264 | |
265 | *bv = bio->bi_io_vec[idx]; |
266 | |
267 | /* |
268 | * iter.bi_bvec_done records actual length of the last bvec |
269 | * if this bio ends in the middle of one io vector |
270 | */ |
271 | if (iter.bi_bvec_done) |
272 | bv->bv_len = iter.bi_bvec_done; |
273 | } |
274 | |
275 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) |
276 | { |
277 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
278 | return bio->bi_io_vec; |
279 | } |
280 | |
281 | static inline struct page *bio_first_page_all(struct bio *bio) |
282 | { |
283 | return bio_first_bvec_all(bio)->bv_page; |
284 | } |
285 | |
286 | static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) |
287 | { |
288 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
289 | return &bio->bi_io_vec[bio->bi_vcnt - 1]; |
290 | } |
291 | |
292 | enum bip_flags { |
293 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ |
294 | BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ |
295 | BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ |
296 | BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ |
297 | BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ |
298 | }; |
299 | |
300 | /* |
301 | * bio integrity payload |
302 | */ |
303 | struct bio_integrity_payload { |
304 | struct bio *bip_bio; /* parent bio */ |
305 | |
306 | struct bvec_iter bip_iter; |
307 | |
308 | unsigned short bip_slab; /* slab the bip came from */ |
309 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
310 | unsigned short bip_max_vcnt; /* integrity bio_vec slots */ |
311 | unsigned short bip_flags; /* control flags */ |
312 | |
313 | struct bvec_iter bio_iter; /* for rewinding parent bio */ |
314 | |
315 | struct work_struct bip_work; /* I/O completion */ |
316 | |
317 | struct bio_vec *bip_vec; |
318 | struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ |
319 | }; |
320 | |
321 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
322 | |
323 | static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) |
324 | { |
325 | if (bio->bi_opf & REQ_INTEGRITY) |
326 | return bio->bi_integrity; |
327 | |
328 | return NULL; |
329 | } |
330 | |
331 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
332 | { |
333 | struct bio_integrity_payload *bip = bio_integrity(bio); |
334 | |
335 | if (bip) |
336 | return bip->bip_flags & flag; |
337 | |
338 | return false; |
339 | } |
340 | |
341 | static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) |
342 | { |
343 | return bip->bip_iter.bi_sector; |
344 | } |
345 | |
346 | static inline void bip_set_seed(struct bio_integrity_payload *bip, |
347 | sector_t seed) |
348 | { |
349 | bip->bip_iter.bi_sector = seed; |
350 | } |
351 | |
352 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
353 | |
354 | extern void bio_trim(struct bio *bio, int offset, int size); |
355 | extern struct bio *bio_split(struct bio *bio, int sectors, |
356 | gfp_t gfp, struct bio_set *bs); |
357 | |
358 | /** |
359 | * bio_next_split - get next @sectors from a bio, splitting if necessary |
360 | * @bio: bio to split |
361 | * @sectors: number of sectors to split from the front of @bio |
362 | * @gfp: gfp mask |
363 | * @bs: bio set to allocate from |
364 | * |
365 | * Returns a bio representing the next @sectors of @bio - if the bio is smaller |
366 | * than @sectors, returns the original bio unchanged. |
367 | */ |
368 | static inline struct bio *bio_next_split(struct bio *bio, int sectors, |
369 | gfp_t gfp, struct bio_set *bs) |
370 | { |
371 | if (sectors >= bio_sectors(bio)) |
372 | return bio; |
373 | |
374 | return bio_split(bio, sectors, gfp, bs); |
375 | } |
376 | |
377 | enum { |
378 | BIOSET_NEED_BVECS = BIT(0), |
379 | BIOSET_NEED_RESCUER = BIT(1), |
380 | }; |
381 | extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); |
382 | extern void bioset_exit(struct bio_set *); |
383 | extern int biovec_init_pool(mempool_t *pool, int pool_entries); |
384 | extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); |
385 | |
386 | extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); |
387 | extern void bio_put(struct bio *); |
388 | |
389 | extern void __bio_clone_fast(struct bio *, struct bio *); |
390 | extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); |
391 | |
392 | extern struct bio_set fs_bio_set; |
393 | |
394 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
395 | { |
396 | return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); |
397 | } |
398 | |
399 | static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
400 | { |
401 | return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); |
402 | } |
403 | |
404 | extern blk_qc_t submit_bio(struct bio *); |
405 | |
406 | extern void bio_endio(struct bio *); |
407 | |
408 | static inline void bio_io_error(struct bio *bio) |
409 | { |
410 | bio->bi_status = BLK_STS_IOERR; |
411 | bio_endio(bio); |
412 | } |
413 | |
414 | static inline void bio_wouldblock_error(struct bio *bio) |
415 | { |
416 | bio->bi_status = BLK_STS_AGAIN; |
417 | bio_endio(bio); |
418 | } |
419 | |
420 | struct request_queue; |
421 | extern int bio_phys_segments(struct request_queue *, struct bio *); |
422 | |
423 | extern int submit_bio_wait(struct bio *bio); |
424 | extern void bio_advance(struct bio *, unsigned); |
425 | |
426 | extern void bio_init(struct bio *bio, struct bio_vec *table, |
427 | unsigned short max_vecs); |
428 | extern void bio_uninit(struct bio *); |
429 | extern void bio_reset(struct bio *); |
430 | void bio_chain(struct bio *, struct bio *); |
431 | |
432 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); |
433 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
434 | unsigned int, unsigned int); |
435 | bool __bio_try_merge_page(struct bio *bio, struct page *page, |
436 | unsigned int len, unsigned int off, bool same_page); |
437 | void __bio_add_page(struct bio *bio, struct page *page, |
438 | unsigned int len, unsigned int off); |
439 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); |
440 | struct rq_map_data; |
441 | extern struct bio *bio_map_user_iov(struct request_queue *, |
442 | struct iov_iter *, gfp_t); |
443 | extern void bio_unmap_user(struct bio *); |
444 | extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, |
445 | gfp_t); |
446 | extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, |
447 | gfp_t, int); |
448 | extern void bio_set_pages_dirty(struct bio *bio); |
449 | extern void bio_check_pages_dirty(struct bio *bio); |
450 | |
451 | void generic_start_io_acct(struct request_queue *q, int op, |
452 | unsigned long sectors, struct hd_struct *part); |
453 | void generic_end_io_acct(struct request_queue *q, int op, |
454 | struct hd_struct *part, |
455 | unsigned long start_time); |
456 | |
457 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
458 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
459 | #endif |
460 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
461 | extern void bio_flush_dcache_pages(struct bio *bi); |
462 | #else |
463 | static inline void bio_flush_dcache_pages(struct bio *bi) |
464 | { |
465 | } |
466 | #endif |
467 | |
468 | extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
469 | struct bio *src, struct bvec_iter *src_iter); |
470 | extern void bio_copy_data(struct bio *dst, struct bio *src); |
471 | extern void bio_list_copy_data(struct bio *dst, struct bio *src); |
472 | extern void bio_free_pages(struct bio *bio); |
473 | |
474 | extern struct bio *bio_copy_user_iov(struct request_queue *, |
475 | struct rq_map_data *, |
476 | struct iov_iter *, |
477 | gfp_t); |
478 | extern int bio_uncopy_user(struct bio *); |
479 | void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); |
480 | |
481 | static inline void zero_fill_bio(struct bio *bio) |
482 | { |
483 | zero_fill_bio_iter(bio, bio->bi_iter); |
484 | } |
485 | |
486 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); |
487 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); |
488 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
489 | extern const char *bio_devname(struct bio *bio, char *buffer); |
490 | |
491 | #define bio_set_dev(bio, bdev) \ |
492 | do { \ |
493 | if ((bio)->bi_disk != (bdev)->bd_disk) \ |
494 | bio_clear_flag(bio, BIO_THROTTLED);\ |
495 | (bio)->bi_disk = (bdev)->bd_disk; \ |
496 | (bio)->bi_partno = (bdev)->bd_partno; \ |
497 | bio_associate_blkg(bio); \ |
498 | } while (0) |
499 | |
500 | #define bio_copy_dev(dst, src) \ |
501 | do { \ |
502 | (dst)->bi_disk = (src)->bi_disk; \ |
503 | (dst)->bi_partno = (src)->bi_partno; \ |
504 | bio_clone_blkg_association(dst, src); \ |
505 | } while (0) |
506 | |
507 | #define bio_dev(bio) \ |
508 | disk_devt((bio)->bi_disk) |
509 | |
510 | #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
511 | void bio_associate_blkg_from_page(struct bio *bio, struct page *page); |
512 | #else |
513 | static inline void bio_associate_blkg_from_page(struct bio *bio, |
514 | struct page *page) { } |
515 | #endif |
516 | |
517 | #ifdef CONFIG_BLK_CGROUP |
518 | void bio_disassociate_blkg(struct bio *bio); |
519 | void bio_associate_blkg(struct bio *bio); |
520 | void bio_associate_blkg_from_css(struct bio *bio, |
521 | struct cgroup_subsys_state *css); |
522 | void bio_clone_blkg_association(struct bio *dst, struct bio *src); |
523 | #else /* CONFIG_BLK_CGROUP */ |
524 | static inline void bio_disassociate_blkg(struct bio *bio) { } |
525 | static inline void bio_associate_blkg(struct bio *bio) { } |
526 | static inline void bio_associate_blkg_from_css(struct bio *bio, |
527 | struct cgroup_subsys_state *css) |
528 | { } |
529 | static inline void bio_clone_blkg_association(struct bio *dst, |
530 | struct bio *src) { } |
531 | #endif /* CONFIG_BLK_CGROUP */ |
532 | |
533 | #ifdef CONFIG_HIGHMEM |
534 | /* |
535 | * remember never ever reenable interrupts between a bvec_kmap_irq and |
536 | * bvec_kunmap_irq! |
537 | */ |
538 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
539 | { |
540 | unsigned long addr; |
541 | |
542 | /* |
543 | * might not be a highmem page, but the preempt/irq count |
544 | * balancing is a lot nicer this way |
545 | */ |
546 | local_irq_save(*flags); |
547 | addr = (unsigned long) kmap_atomic(bvec->bv_page); |
548 | |
549 | BUG_ON(addr & ~PAGE_MASK); |
550 | |
551 | return (char *) addr + bvec->bv_offset; |
552 | } |
553 | |
554 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
555 | { |
556 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
557 | |
558 | kunmap_atomic((void *) ptr); |
559 | local_irq_restore(*flags); |
560 | } |
561 | |
562 | #else |
563 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
564 | { |
565 | return page_address(bvec->bv_page) + bvec->bv_offset; |
566 | } |
567 | |
568 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
569 | { |
570 | *flags = 0; |
571 | } |
572 | #endif |
573 | |
574 | /* |
575 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
576 | * |
577 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
578 | * member of the bio. The bio_list also caches the last list member to allow |
579 | * fast access to the tail. |
580 | */ |
581 | struct bio_list { |
582 | struct bio *head; |
583 | struct bio *tail; |
584 | }; |
585 | |
586 | static inline int bio_list_empty(const struct bio_list *bl) |
587 | { |
588 | return bl->head == NULL; |
589 | } |
590 | |
591 | static inline void bio_list_init(struct bio_list *bl) |
592 | { |
593 | bl->head = bl->tail = NULL; |
594 | } |
595 | |
596 | #define BIO_EMPTY_LIST { NULL, NULL } |
597 | |
598 | #define bio_list_for_each(bio, bl) \ |
599 | for (bio = (bl)->head; bio; bio = bio->bi_next) |
600 | |
601 | static inline unsigned bio_list_size(const struct bio_list *bl) |
602 | { |
603 | unsigned sz = 0; |
604 | struct bio *bio; |
605 | |
606 | bio_list_for_each(bio, bl) |
607 | sz++; |
608 | |
609 | return sz; |
610 | } |
611 | |
612 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) |
613 | { |
614 | bio->bi_next = NULL; |
615 | |
616 | if (bl->tail) |
617 | bl->tail->bi_next = bio; |
618 | else |
619 | bl->head = bio; |
620 | |
621 | bl->tail = bio; |
622 | } |
623 | |
624 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) |
625 | { |
626 | bio->bi_next = bl->head; |
627 | |
628 | bl->head = bio; |
629 | |
630 | if (!bl->tail) |
631 | bl->tail = bio; |
632 | } |
633 | |
634 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) |
635 | { |
636 | if (!bl2->head) |
637 | return; |
638 | |
639 | if (bl->tail) |
640 | bl->tail->bi_next = bl2->head; |
641 | else |
642 | bl->head = bl2->head; |
643 | |
644 | bl->tail = bl2->tail; |
645 | } |
646 | |
647 | static inline void bio_list_merge_head(struct bio_list *bl, |
648 | struct bio_list *bl2) |
649 | { |
650 | if (!bl2->head) |
651 | return; |
652 | |
653 | if (bl->head) |
654 | bl2->tail->bi_next = bl->head; |
655 | else |
656 | bl->tail = bl2->tail; |
657 | |
658 | bl->head = bl2->head; |
659 | } |
660 | |
661 | static inline struct bio *bio_list_peek(struct bio_list *bl) |
662 | { |
663 | return bl->head; |
664 | } |
665 | |
666 | static inline struct bio *bio_list_pop(struct bio_list *bl) |
667 | { |
668 | struct bio *bio = bl->head; |
669 | |
670 | if (bio) { |
671 | bl->head = bl->head->bi_next; |
672 | if (!bl->head) |
673 | bl->tail = NULL; |
674 | |
675 | bio->bi_next = NULL; |
676 | } |
677 | |
678 | return bio; |
679 | } |
680 | |
681 | static inline struct bio *bio_list_get(struct bio_list *bl) |
682 | { |
683 | struct bio *bio = bl->head; |
684 | |
685 | bl->head = bl->tail = NULL; |
686 | |
687 | return bio; |
688 | } |
689 | |
690 | /* |
691 | * Increment chain count for the bio. Make sure the CHAIN flag update |
692 | * is visible before the raised count. |
693 | */ |
694 | static inline void bio_inc_remaining(struct bio *bio) |
695 | { |
696 | bio_set_flag(bio, BIO_CHAIN); |
697 | smp_mb__before_atomic(); |
698 | atomic_inc(&bio->__bi_remaining); |
699 | } |
700 | |
701 | /* |
702 | * bio_set is used to allow other portions of the IO system to |
703 | * allocate their own private memory pools for bio and iovec structures. |
704 | * These memory pools in turn all allocate from the bio_slab |
705 | * and the bvec_slabs[]. |
706 | */ |
707 | #define BIO_POOL_SIZE 2 |
708 | |
709 | struct bio_set { |
710 | struct kmem_cache *bio_slab; |
711 | unsigned int front_pad; |
712 | |
713 | mempool_t bio_pool; |
714 | mempool_t bvec_pool; |
715 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
716 | mempool_t bio_integrity_pool; |
717 | mempool_t bvec_integrity_pool; |
718 | #endif |
719 | |
720 | /* |
721 | * Deadlock avoidance for stacking block drivers: see comments in |
722 | * bio_alloc_bioset() for details |
723 | */ |
724 | spinlock_t rescue_lock; |
725 | struct bio_list rescue_list; |
726 | struct work_struct rescue_work; |
727 | struct workqueue_struct *rescue_workqueue; |
728 | }; |
729 | |
730 | struct biovec_slab { |
731 | int nr_vecs; |
732 | char *name; |
733 | struct kmem_cache *slab; |
734 | }; |
735 | |
736 | static inline bool bioset_initialized(struct bio_set *bs) |
737 | { |
738 | return bs->bio_slab != NULL; |
739 | } |
740 | |
741 | /* |
742 | * a small number of entries is fine, not going to be performance critical. |
743 | * basically we just need to survive |
744 | */ |
745 | #define BIO_SPLIT_ENTRIES 2 |
746 | |
747 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
748 | |
749 | #define bip_for_each_vec(bvl, bip, iter) \ |
750 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) |
751 | |
752 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ |
753 | for_each_bio(_bio) \ |
754 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) |
755 | |
756 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
757 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
758 | extern bool bio_integrity_prep(struct bio *); |
759 | extern void bio_integrity_advance(struct bio *, unsigned int); |
760 | extern void bio_integrity_trim(struct bio *); |
761 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); |
762 | extern int bioset_integrity_create(struct bio_set *, int); |
763 | extern void bioset_integrity_free(struct bio_set *); |
764 | extern void bio_integrity_init(void); |
765 | |
766 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
767 | |
768 | static inline void *bio_integrity(struct bio *bio) |
769 | { |
770 | return NULL; |
771 | } |
772 | |
773 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) |
774 | { |
775 | return 0; |
776 | } |
777 | |
778 | static inline void bioset_integrity_free (struct bio_set *bs) |
779 | { |
780 | return; |
781 | } |
782 | |
783 | static inline bool bio_integrity_prep(struct bio *bio) |
784 | { |
785 | return true; |
786 | } |
787 | |
788 | static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
789 | gfp_t gfp_mask) |
790 | { |
791 | return 0; |
792 | } |
793 | |
794 | static inline void bio_integrity_advance(struct bio *bio, |
795 | unsigned int bytes_done) |
796 | { |
797 | return; |
798 | } |
799 | |
800 | static inline void bio_integrity_trim(struct bio *bio) |
801 | { |
802 | return; |
803 | } |
804 | |
805 | static inline void bio_integrity_init(void) |
806 | { |
807 | return; |
808 | } |
809 | |
810 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
811 | { |
812 | return false; |
813 | } |
814 | |
815 | static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, |
816 | unsigned int nr) |
817 | { |
818 | return ERR_PTR(-EINVAL); |
819 | } |
820 | |
821 | static inline int bio_integrity_add_page(struct bio *bio, struct page *page, |
822 | unsigned int len, unsigned int offset) |
823 | { |
824 | return 0; |
825 | } |
826 | |
827 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
828 | |
829 | /* |
830 | * Mark a bio as polled. Note that for async polled IO, the caller must |
831 | * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). |
832 | * We cannot block waiting for requests on polled IO, as those completions |
833 | * must be found by the caller. This is different than IRQ driven IO, where |
834 | * it's safe to wait for IO to complete. |
835 | */ |
836 | static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) |
837 | { |
838 | bio->bi_opf |= REQ_HIPRI; |
839 | if (!is_sync_kiocb(kiocb)) |
840 | bio->bi_opf |= REQ_NOWAIT; |
841 | } |
842 | |
843 | #endif /* CONFIG_BLOCK */ |
844 | #endif /* __LINUX_BIO_H */ |
845 | |