1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BCACHEFS_IO_READ_H |
3 | #define _BCACHEFS_IO_READ_H |
4 | |
5 | #include "bkey_buf.h" |
6 | |
7 | struct bch_read_bio { |
8 | struct bch_fs *c; |
9 | u64 start_time; |
10 | u64 submit_time; |
11 | |
12 | /* |
13 | * Reads will often have to be split, and if the extent being read from |
14 | * was checksummed or compressed we'll also have to allocate bounce |
15 | * buffers and copy the data back into the original bio. |
16 | * |
17 | * If we didn't have to split, we have to save and restore the original |
18 | * bi_end_io - @split below indicates which: |
19 | */ |
20 | union { |
21 | struct bch_read_bio *parent; |
22 | bio_end_io_t *end_io; |
23 | }; |
24 | |
25 | /* |
26 | * Saved copy of bio->bi_iter, from submission time - allows us to |
27 | * resubmit on IO error, and also to copy data back to the original bio |
28 | * when we're bouncing: |
29 | */ |
30 | struct bvec_iter bvec_iter; |
31 | |
32 | unsigned offset_into_extent; |
33 | |
34 | u16 flags; |
35 | union { |
36 | struct { |
37 | u16 bounce:1, |
38 | split:1, |
39 | kmalloc:1, |
40 | have_ioref:1, |
41 | narrow_crcs:1, |
42 | hole:1, |
43 | retry:2, |
44 | context:2; |
45 | }; |
46 | u16 _state; |
47 | }; |
48 | |
49 | struct bch_devs_list devs_have; |
50 | |
51 | struct extent_ptr_decoded pick; |
52 | |
53 | /* |
54 | * pos we read from - different from data_pos for indirect extents: |
55 | */ |
56 | u32 subvol; |
57 | struct bpos read_pos; |
58 | |
59 | /* |
60 | * start pos of data we read (may not be pos of data we want) - for |
61 | * promote, narrow extents paths: |
62 | */ |
63 | enum btree_id data_btree; |
64 | struct bpos data_pos; |
65 | struct bversion version; |
66 | |
67 | struct promote_op *promote; |
68 | |
69 | struct bch_io_opts opts; |
70 | |
71 | struct work_struct work; |
72 | |
73 | struct bio bio; |
74 | }; |
75 | |
76 | #define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio) |
77 | |
78 | struct bch_devs_mask; |
79 | struct cache_promote_op; |
80 | struct extent_ptr_decoded; |
81 | |
82 | int __bch2_read_indirect_extent(struct btree_trans *, unsigned *, |
83 | struct bkey_buf *); |
84 | |
85 | static inline int bch2_read_indirect_extent(struct btree_trans *trans, |
86 | enum btree_id *data_btree, |
87 | unsigned *offset_into_extent, |
88 | struct bkey_buf *k) |
89 | { |
90 | if (k->k->k.type != KEY_TYPE_reflink_p) |
91 | return 0; |
92 | |
93 | *data_btree = BTREE_ID_reflink; |
94 | return __bch2_read_indirect_extent(trans, offset_into_extent, k); |
95 | } |
96 | |
97 | enum bch_read_flags { |
98 | BCH_READ_RETRY_IF_STALE = 1 << 0, |
99 | BCH_READ_MAY_PROMOTE = 1 << 1, |
100 | BCH_READ_USER_MAPPED = 1 << 2, |
101 | BCH_READ_NODECODE = 1 << 3, |
102 | BCH_READ_LAST_FRAGMENT = 1 << 4, |
103 | |
104 | /* internal: */ |
105 | BCH_READ_MUST_BOUNCE = 1 << 5, |
106 | BCH_READ_MUST_CLONE = 1 << 6, |
107 | BCH_READ_IN_RETRY = 1 << 7, |
108 | }; |
109 | |
110 | int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *, |
111 | struct bvec_iter, struct bpos, enum btree_id, |
112 | struct bkey_s_c, unsigned, |
113 | struct bch_io_failures *, unsigned); |
114 | |
115 | static inline void bch2_read_extent(struct btree_trans *trans, |
116 | struct bch_read_bio *rbio, struct bpos read_pos, |
117 | enum btree_id data_btree, struct bkey_s_c k, |
118 | unsigned offset_into_extent, unsigned flags) |
119 | { |
120 | __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos, |
121 | data_btree, k, offset_into_extent, NULL, flags); |
122 | } |
123 | |
124 | void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter, |
125 | subvol_inum, struct bch_io_failures *, unsigned flags); |
126 | |
127 | static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, |
128 | subvol_inum inum) |
129 | { |
130 | struct bch_io_failures failed = { .nr = 0 }; |
131 | |
132 | BUG_ON(rbio->_state); |
133 | |
134 | rbio->c = c; |
135 | rbio->start_time = local_clock(); |
136 | rbio->subvol = inum.subvol; |
137 | |
138 | __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed, |
139 | flags: BCH_READ_RETRY_IF_STALE| |
140 | BCH_READ_MAY_PROMOTE| |
141 | BCH_READ_USER_MAPPED); |
142 | } |
143 | |
144 | static inline struct bch_read_bio *rbio_init(struct bio *bio, |
145 | struct bch_io_opts opts) |
146 | { |
147 | struct bch_read_bio *rbio = to_rbio(bio); |
148 | |
149 | rbio->_state = 0; |
150 | rbio->promote = NULL; |
151 | rbio->opts = opts; |
152 | return rbio; |
153 | } |
154 | |
155 | void bch2_fs_io_read_exit(struct bch_fs *); |
156 | int bch2_fs_io_read_init(struct bch_fs *); |
157 | |
158 | #endif /* _BCACHEFS_IO_READ_H */ |
159 | |