1/*
2 * t10_pi.c - Functions for generating and verifying T10 Protection
3 * Information.
4 *
5 * Copyright (C) 2007, 2008, 2014 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
20 * USA.
21 *
22 */
23
24#include <linux/t10-pi.h>
25#include <linux/blkdev.h>
26#include <linux/crc-t10dif.h>
27#include <net/checksum.h>
28
29typedef __be16 (csum_fn) (void *, unsigned int);
30
31static __be16 t10_pi_crc_fn(void *data, unsigned int len)
32{
33 return cpu_to_be16(crc_t10dif(data, len));
34}
35
36static __be16 t10_pi_ip_fn(void *data, unsigned int len)
37{
38 return (__force __be16)ip_compute_csum(data, len);
39}
40
41/*
42 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
43 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
44 * tag.
45 */
46static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
47 csum_fn *fn, unsigned int type)
48{
49 unsigned int i;
50
51 for (i = 0 ; i < iter->data_size ; i += iter->interval) {
52 struct t10_pi_tuple *pi = iter->prot_buf;
53
54 pi->guard_tag = fn(iter->data_buf, iter->interval);
55 pi->app_tag = 0;
56
57 if (type == 1)
58 pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
59 else
60 pi->ref_tag = 0;
61
62 iter->data_buf += iter->interval;
63 iter->prot_buf += sizeof(struct t10_pi_tuple);
64 iter->seed++;
65 }
66
67 return BLK_STS_OK;
68}
69
70static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
71 csum_fn *fn, unsigned int type)
72{
73 unsigned int i;
74
75 for (i = 0 ; i < iter->data_size ; i += iter->interval) {
76 struct t10_pi_tuple *pi = iter->prot_buf;
77 __be16 csum;
78
79 switch (type) {
80 case 1:
81 case 2:
82 if (pi->app_tag == T10_PI_APP_ESCAPE)
83 goto next;
84
85 if (be32_to_cpu(pi->ref_tag) !=
86 lower_32_bits(iter->seed)) {
87 pr_err("%s: ref tag error at location %llu " \
88 "(rcvd %u)\n", iter->disk_name,
89 (unsigned long long)
90 iter->seed, be32_to_cpu(pi->ref_tag));
91 return BLK_STS_PROTECTION;
92 }
93 break;
94 case 3:
95 if (pi->app_tag == T10_PI_APP_ESCAPE &&
96 pi->ref_tag == T10_PI_REF_ESCAPE)
97 goto next;
98 break;
99 }
100
101 csum = fn(iter->data_buf, iter->interval);
102
103 if (pi->guard_tag != csum) {
104 pr_err("%s: guard tag error at sector %llu " \
105 "(rcvd %04x, want %04x)\n", iter->disk_name,
106 (unsigned long long)iter->seed,
107 be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
108 return BLK_STS_PROTECTION;
109 }
110
111next:
112 iter->data_buf += iter->interval;
113 iter->prot_buf += sizeof(struct t10_pi_tuple);
114 iter->seed++;
115 }
116
117 return BLK_STS_OK;
118}
119
120static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
121{
122 return t10_pi_generate(iter, t10_pi_crc_fn, 1);
123}
124
125static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
126{
127 return t10_pi_generate(iter, t10_pi_ip_fn, 1);
128}
129
130static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
131{
132 return t10_pi_verify(iter, t10_pi_crc_fn, 1);
133}
134
135static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
136{
137 return t10_pi_verify(iter, t10_pi_ip_fn, 1);
138}
139
140static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
141{
142 return t10_pi_generate(iter, t10_pi_crc_fn, 3);
143}
144
145static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
146{
147 return t10_pi_generate(iter, t10_pi_ip_fn, 3);
148}
149
150static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
151{
152 return t10_pi_verify(iter, t10_pi_crc_fn, 3);
153}
154
155static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
156{
157 return t10_pi_verify(iter, t10_pi_ip_fn, 3);
158}
159
160const struct blk_integrity_profile t10_pi_type1_crc = {
161 .name = "T10-DIF-TYPE1-CRC",
162 .generate_fn = t10_pi_type1_generate_crc,
163 .verify_fn = t10_pi_type1_verify_crc,
164};
165EXPORT_SYMBOL(t10_pi_type1_crc);
166
167const struct blk_integrity_profile t10_pi_type1_ip = {
168 .name = "T10-DIF-TYPE1-IP",
169 .generate_fn = t10_pi_type1_generate_ip,
170 .verify_fn = t10_pi_type1_verify_ip,
171};
172EXPORT_SYMBOL(t10_pi_type1_ip);
173
174const struct blk_integrity_profile t10_pi_type3_crc = {
175 .name = "T10-DIF-TYPE3-CRC",
176 .generate_fn = t10_pi_type3_generate_crc,
177 .verify_fn = t10_pi_type3_verify_crc,
178};
179EXPORT_SYMBOL(t10_pi_type3_crc);
180
181const struct blk_integrity_profile t10_pi_type3_ip = {
182 .name = "T10-DIF-TYPE3-IP",
183 .generate_fn = t10_pi_type3_generate_ip,
184 .verify_fn = t10_pi_type3_verify_ip,
185};
186EXPORT_SYMBOL(t10_pi_type3_ip);
187
188/**
189 * t10_pi_prepare - prepare PI prior submitting request to device
190 * @rq: request with PI that should be prepared
191 * @protection_type: PI type (Type 1/Type 2/Type 3)
192 *
193 * For Type 1/Type 2, the virtual start sector is the one that was
194 * originally submitted by the block layer for the ref_tag usage. Due to
195 * partitioning, MD/DM cloning, etc. the actual physical start sector is
196 * likely to be different. Remap protection information to match the
197 * physical LBA.
198 *
199 * Type 3 does not have a reference tag so no remapping is required.
200 */
201void t10_pi_prepare(struct request *rq, u8 protection_type)
202{
203 const int tuple_sz = rq->q->integrity.tuple_size;
204 u32 ref_tag = t10_pi_ref_tag(rq);
205 struct bio *bio;
206
207 if (protection_type == T10_PI_TYPE3_PROTECTION)
208 return;
209
210 __rq_for_each_bio(bio, rq) {
211 struct bio_integrity_payload *bip = bio_integrity(bio);
212 u32 virt = bip_get_seed(bip) & 0xffffffff;
213 struct bio_vec iv;
214 struct bvec_iter iter;
215
216 /* Already remapped? */
217 if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
218 break;
219
220 bip_for_each_vec(iv, bip, iter) {
221 void *p, *pmap;
222 unsigned int j;
223
224 pmap = kmap_atomic(iv.bv_page);
225 p = pmap + iv.bv_offset;
226 for (j = 0; j < iv.bv_len; j += tuple_sz) {
227 struct t10_pi_tuple *pi = p;
228
229 if (be32_to_cpu(pi->ref_tag) == virt)
230 pi->ref_tag = cpu_to_be32(ref_tag);
231 virt++;
232 ref_tag++;
233 p += tuple_sz;
234 }
235
236 kunmap_atomic(pmap);
237 }
238
239 bip->bip_flags |= BIP_MAPPED_INTEGRITY;
240 }
241}
242EXPORT_SYMBOL(t10_pi_prepare);
243
244/**
245 * t10_pi_complete - prepare PI prior returning request to the block layer
246 * @rq: request with PI that should be prepared
247 * @protection_type: PI type (Type 1/Type 2/Type 3)
248 * @intervals: total elements to prepare
249 *
250 * For Type 1/Type 2, the virtual start sector is the one that was
251 * originally submitted by the block layer for the ref_tag usage. Due to
252 * partitioning, MD/DM cloning, etc. the actual physical start sector is
253 * likely to be different. Since the physical start sector was submitted
254 * to the device, we should remap it back to virtual values expected by the
255 * block layer.
256 *
257 * Type 3 does not have a reference tag so no remapping is required.
258 */
259void t10_pi_complete(struct request *rq, u8 protection_type,
260 unsigned int intervals)
261{
262 const int tuple_sz = rq->q->integrity.tuple_size;
263 u32 ref_tag = t10_pi_ref_tag(rq);
264 struct bio *bio;
265
266 if (protection_type == T10_PI_TYPE3_PROTECTION)
267 return;
268
269 __rq_for_each_bio(bio, rq) {
270 struct bio_integrity_payload *bip = bio_integrity(bio);
271 u32 virt = bip_get_seed(bip) & 0xffffffff;
272 struct bio_vec iv;
273 struct bvec_iter iter;
274
275 bip_for_each_vec(iv, bip, iter) {
276 void *p, *pmap;
277 unsigned int j;
278
279 pmap = kmap_atomic(iv.bv_page);
280 p = pmap + iv.bv_offset;
281 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
282 struct t10_pi_tuple *pi = p;
283
284 if (be32_to_cpu(pi->ref_tag) == ref_tag)
285 pi->ref_tag = cpu_to_be32(virt);
286 virt++;
287 ref_tag++;
288 intervals--;
289 p += tuple_sz;
290 }
291
292 kunmap_atomic(pmap);
293 }
294 }
295}
296EXPORT_SYMBOL(t10_pi_complete);
297