1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* XTS: as defined in IEEE1619/D16 |
3 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf |
4 | * |
5 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> |
6 | * |
7 | * Based on ecb.c |
8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
9 | */ |
10 | #include <crypto/internal/cipher.h> |
11 | #include <crypto/internal/skcipher.h> |
12 | #include <crypto/scatterwalk.h> |
13 | #include <linux/err.h> |
14 | #include <linux/init.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> |
17 | #include <linux/scatterlist.h> |
18 | #include <linux/slab.h> |
19 | |
20 | #include <crypto/xts.h> |
21 | #include <crypto/b128ops.h> |
22 | #include <crypto/gf128mul.h> |
23 | |
24 | struct xts_tfm_ctx { |
25 | struct crypto_skcipher *child; |
26 | struct crypto_cipher *tweak; |
27 | }; |
28 | |
29 | struct xts_instance_ctx { |
30 | struct crypto_skcipher_spawn spawn; |
31 | struct crypto_cipher_spawn tweak_spawn; |
32 | }; |
33 | |
34 | struct xts_request_ctx { |
35 | le128 t; |
36 | struct scatterlist *tail; |
37 | struct scatterlist sg[2]; |
38 | struct skcipher_request subreq; |
39 | }; |
40 | |
41 | static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, |
42 | unsigned int keylen) |
43 | { |
44 | struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm: parent); |
45 | struct crypto_skcipher *child; |
46 | struct crypto_cipher *tweak; |
47 | int err; |
48 | |
49 | err = xts_verify_key(tfm: parent, key, keylen); |
50 | if (err) |
51 | return err; |
52 | |
53 | keylen /= 2; |
54 | |
55 | /* we need two cipher instances: one to compute the initial 'tweak' |
56 | * by encrypting the IV (usually the 'plain' iv) and the other |
57 | * one to encrypt and decrypt the data */ |
58 | |
59 | /* tweak cipher, uses Key2 i.e. the second half of *key */ |
60 | tweak = ctx->tweak; |
61 | crypto_cipher_clear_flags(tfm: tweak, CRYPTO_TFM_REQ_MASK); |
62 | crypto_cipher_set_flags(tfm: tweak, flags: crypto_skcipher_get_flags(tfm: parent) & |
63 | CRYPTO_TFM_REQ_MASK); |
64 | err = crypto_cipher_setkey(tfm: tweak, key: key + keylen, keylen); |
65 | if (err) |
66 | return err; |
67 | |
68 | /* data cipher, uses Key1 i.e. the first half of *key */ |
69 | child = ctx->child; |
70 | crypto_skcipher_clear_flags(tfm: child, CRYPTO_TFM_REQ_MASK); |
71 | crypto_skcipher_set_flags(tfm: child, flags: crypto_skcipher_get_flags(tfm: parent) & |
72 | CRYPTO_TFM_REQ_MASK); |
73 | return crypto_skcipher_setkey(tfm: child, key, keylen); |
74 | } |
75 | |
76 | /* |
77 | * We compute the tweak masks twice (both before and after the ECB encryption or |
78 | * decryption) to avoid having to allocate a temporary buffer and/or make |
79 | * mutliple calls to the 'ecb(..)' instance, which usually would be slower than |
80 | * just doing the gf128mul_x_ble() calls again. |
81 | */ |
82 | static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, |
83 | bool enc) |
84 | { |
85 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
86 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
87 | const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); |
88 | const int bs = XTS_BLOCK_SIZE; |
89 | struct skcipher_walk w; |
90 | le128 t = rctx->t; |
91 | int err; |
92 | |
93 | if (second_pass) { |
94 | req = &rctx->subreq; |
95 | /* set to our TFM to enforce correct alignment: */ |
96 | skcipher_request_set_tfm(req, tfm); |
97 | } |
98 | err = skcipher_walk_virt(walk: &w, req, atomic: false); |
99 | |
100 | while (w.nbytes) { |
101 | unsigned int avail = w.nbytes; |
102 | le128 *wsrc; |
103 | le128 *wdst; |
104 | |
105 | wsrc = w.src.virt.addr; |
106 | wdst = w.dst.virt.addr; |
107 | |
108 | do { |
109 | if (unlikely(cts) && |
110 | w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) { |
111 | if (!enc) { |
112 | if (second_pass) |
113 | rctx->t = t; |
114 | gf128mul_x_ble(r: &t, x: &t); |
115 | } |
116 | le128_xor(r: wdst, p: &t, q: wsrc); |
117 | if (enc && second_pass) |
118 | gf128mul_x_ble(r: &rctx->t, x: &t); |
119 | skcipher_walk_done(walk: &w, err: avail - bs); |
120 | return 0; |
121 | } |
122 | |
123 | le128_xor(r: wdst++, p: &t, q: wsrc++); |
124 | gf128mul_x_ble(r: &t, x: &t); |
125 | } while ((avail -= bs) >= bs); |
126 | |
127 | err = skcipher_walk_done(walk: &w, err: avail); |
128 | } |
129 | |
130 | return err; |
131 | } |
132 | |
133 | static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc) |
134 | { |
135 | return xts_xor_tweak(req, second_pass: false, enc); |
136 | } |
137 | |
138 | static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) |
139 | { |
140 | return xts_xor_tweak(req, second_pass: true, enc); |
141 | } |
142 | |
143 | static void xts_cts_done(void *data, int err) |
144 | { |
145 | struct skcipher_request *req = data; |
146 | le128 b; |
147 | |
148 | if (!err) { |
149 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
150 | |
151 | scatterwalk_map_and_copy(buf: &b, sg: rctx->tail, start: 0, XTS_BLOCK_SIZE, out: 0); |
152 | le128_xor(r: &b, p: &rctx->t, q: &b); |
153 | scatterwalk_map_and_copy(buf: &b, sg: rctx->tail, start: 0, XTS_BLOCK_SIZE, out: 1); |
154 | } |
155 | |
156 | skcipher_request_complete(req, err); |
157 | } |
158 | |
159 | static int xts_cts_final(struct skcipher_request *req, |
160 | int (*crypt)(struct skcipher_request *req)) |
161 | { |
162 | const struct xts_tfm_ctx *ctx = |
163 | crypto_skcipher_ctx(tfm: crypto_skcipher_reqtfm(req)); |
164 | int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); |
165 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
166 | struct skcipher_request *subreq = &rctx->subreq; |
167 | int tail = req->cryptlen % XTS_BLOCK_SIZE; |
168 | le128 b[2]; |
169 | int err; |
170 | |
171 | rctx->tail = scatterwalk_ffwd(dst: rctx->sg, src: req->dst, |
172 | len: offset - XTS_BLOCK_SIZE); |
173 | |
174 | scatterwalk_map_and_copy(buf: b, sg: rctx->tail, start: 0, XTS_BLOCK_SIZE, out: 0); |
175 | b[1] = b[0]; |
176 | scatterwalk_map_and_copy(buf: b, sg: req->src, start: offset, nbytes: tail, out: 0); |
177 | |
178 | le128_xor(r: b, p: &rctx->t, q: b); |
179 | |
180 | scatterwalk_map_and_copy(buf: b, sg: rctx->tail, start: 0, XTS_BLOCK_SIZE + tail, out: 1); |
181 | |
182 | skcipher_request_set_tfm(req: subreq, tfm: ctx->child); |
183 | skcipher_request_set_callback(req: subreq, flags: req->base.flags, compl: xts_cts_done, |
184 | data: req); |
185 | skcipher_request_set_crypt(req: subreq, src: rctx->tail, dst: rctx->tail, |
186 | XTS_BLOCK_SIZE, NULL); |
187 | |
188 | err = crypt(subreq); |
189 | if (err) |
190 | return err; |
191 | |
192 | scatterwalk_map_and_copy(buf: b, sg: rctx->tail, start: 0, XTS_BLOCK_SIZE, out: 0); |
193 | le128_xor(r: b, p: &rctx->t, q: b); |
194 | scatterwalk_map_and_copy(buf: b, sg: rctx->tail, start: 0, XTS_BLOCK_SIZE, out: 1); |
195 | |
196 | return 0; |
197 | } |
198 | |
199 | static void xts_encrypt_done(void *data, int err) |
200 | { |
201 | struct skcipher_request *req = data; |
202 | |
203 | if (!err) { |
204 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
205 | |
206 | rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
207 | err = xts_xor_tweak_post(req, enc: true); |
208 | |
209 | if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { |
210 | err = xts_cts_final(req, crypt: crypto_skcipher_encrypt); |
211 | if (err == -EINPROGRESS || err == -EBUSY) |
212 | return; |
213 | } |
214 | } |
215 | |
216 | skcipher_request_complete(req, err); |
217 | } |
218 | |
219 | static void xts_decrypt_done(void *data, int err) |
220 | { |
221 | struct skcipher_request *req = data; |
222 | |
223 | if (!err) { |
224 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
225 | |
226 | rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
227 | err = xts_xor_tweak_post(req, enc: false); |
228 | |
229 | if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { |
230 | err = xts_cts_final(req, crypt: crypto_skcipher_decrypt); |
231 | if (err == -EINPROGRESS || err == -EBUSY) |
232 | return; |
233 | } |
234 | } |
235 | |
236 | skcipher_request_complete(req, err); |
237 | } |
238 | |
239 | static int xts_init_crypt(struct skcipher_request *req, |
240 | crypto_completion_t compl) |
241 | { |
242 | const struct xts_tfm_ctx *ctx = |
243 | crypto_skcipher_ctx(tfm: crypto_skcipher_reqtfm(req)); |
244 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
245 | struct skcipher_request *subreq = &rctx->subreq; |
246 | |
247 | if (req->cryptlen < XTS_BLOCK_SIZE) |
248 | return -EINVAL; |
249 | |
250 | skcipher_request_set_tfm(req: subreq, tfm: ctx->child); |
251 | skcipher_request_set_callback(req: subreq, flags: req->base.flags, compl, data: req); |
252 | skcipher_request_set_crypt(req: subreq, src: req->dst, dst: req->dst, |
253 | cryptlen: req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL); |
254 | |
255 | /* calculate first value of T */ |
256 | crypto_cipher_encrypt_one(tfm: ctx->tweak, dst: (u8 *)&rctx->t, src: req->iv); |
257 | |
258 | return 0; |
259 | } |
260 | |
261 | static int xts_encrypt(struct skcipher_request *req) |
262 | { |
263 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
264 | struct skcipher_request *subreq = &rctx->subreq; |
265 | int err; |
266 | |
267 | err = xts_init_crypt(req, compl: xts_encrypt_done) ?: |
268 | xts_xor_tweak_pre(req, enc: true) ?: |
269 | crypto_skcipher_encrypt(req: subreq) ?: |
270 | xts_xor_tweak_post(req, enc: true); |
271 | |
272 | if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) |
273 | return err; |
274 | |
275 | return xts_cts_final(req, crypt: crypto_skcipher_encrypt); |
276 | } |
277 | |
278 | static int xts_decrypt(struct skcipher_request *req) |
279 | { |
280 | struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
281 | struct skcipher_request *subreq = &rctx->subreq; |
282 | int err; |
283 | |
284 | err = xts_init_crypt(req, compl: xts_decrypt_done) ?: |
285 | xts_xor_tweak_pre(req, enc: false) ?: |
286 | crypto_skcipher_decrypt(req: subreq) ?: |
287 | xts_xor_tweak_post(req, enc: false); |
288 | |
289 | if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) |
290 | return err; |
291 | |
292 | return xts_cts_final(req, crypt: crypto_skcipher_decrypt); |
293 | } |
294 | |
295 | static int xts_init_tfm(struct crypto_skcipher *tfm) |
296 | { |
297 | struct skcipher_instance *inst = skcipher_alg_instance(skcipher: tfm); |
298 | struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); |
299 | struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
300 | struct crypto_skcipher *child; |
301 | struct crypto_cipher *tweak; |
302 | |
303 | child = crypto_spawn_skcipher(spawn: &ictx->spawn); |
304 | if (IS_ERR(ptr: child)) |
305 | return PTR_ERR(ptr: child); |
306 | |
307 | ctx->child = child; |
308 | |
309 | tweak = crypto_spawn_cipher(spawn: &ictx->tweak_spawn); |
310 | if (IS_ERR(ptr: tweak)) { |
311 | crypto_free_skcipher(tfm: ctx->child); |
312 | return PTR_ERR(ptr: tweak); |
313 | } |
314 | |
315 | ctx->tweak = tweak; |
316 | |
317 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: crypto_skcipher_reqsize(tfm: child) + |
318 | sizeof(struct xts_request_ctx)); |
319 | |
320 | return 0; |
321 | } |
322 | |
323 | static void xts_exit_tfm(struct crypto_skcipher *tfm) |
324 | { |
325 | struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
326 | |
327 | crypto_free_skcipher(tfm: ctx->child); |
328 | crypto_free_cipher(tfm: ctx->tweak); |
329 | } |
330 | |
331 | static void xts_free_instance(struct skcipher_instance *inst) |
332 | { |
333 | struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); |
334 | |
335 | crypto_drop_skcipher(spawn: &ictx->spawn); |
336 | crypto_drop_cipher(spawn: &ictx->tweak_spawn); |
337 | kfree(objp: inst); |
338 | } |
339 | |
340 | static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) |
341 | { |
342 | struct skcipher_alg_common *alg; |
343 | char name[CRYPTO_MAX_ALG_NAME]; |
344 | struct skcipher_instance *inst; |
345 | struct xts_instance_ctx *ctx; |
346 | const char *cipher_name; |
347 | u32 mask; |
348 | int err; |
349 | |
350 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, mask_ret: &mask); |
351 | if (err) |
352 | return err; |
353 | |
354 | cipher_name = crypto_attr_alg_name(rta: tb[1]); |
355 | if (IS_ERR(ptr: cipher_name)) |
356 | return PTR_ERR(ptr: cipher_name); |
357 | |
358 | inst = kzalloc(size: sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
359 | if (!inst) |
360 | return -ENOMEM; |
361 | |
362 | ctx = skcipher_instance_ctx(inst); |
363 | |
364 | err = crypto_grab_skcipher(spawn: &ctx->spawn, inst: skcipher_crypto_instance(inst), |
365 | name: cipher_name, type: 0, mask); |
366 | if (err == -ENOENT) { |
367 | err = -ENAMETOOLONG; |
368 | if (snprintf(buf: name, CRYPTO_MAX_ALG_NAME, fmt: "ecb(%s)" , |
369 | cipher_name) >= CRYPTO_MAX_ALG_NAME) |
370 | goto err_free_inst; |
371 | |
372 | err = crypto_grab_skcipher(spawn: &ctx->spawn, |
373 | inst: skcipher_crypto_instance(inst), |
374 | name, type: 0, mask); |
375 | } |
376 | |
377 | if (err) |
378 | goto err_free_inst; |
379 | |
380 | alg = crypto_spawn_skcipher_alg_common(spawn: &ctx->spawn); |
381 | |
382 | err = -EINVAL; |
383 | if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) |
384 | goto err_free_inst; |
385 | |
386 | if (alg->ivsize) |
387 | goto err_free_inst; |
388 | |
389 | err = crypto_inst_setname(inst: skcipher_crypto_instance(inst), name: "xts" , |
390 | alg: &alg->base); |
391 | if (err) |
392 | goto err_free_inst; |
393 | |
394 | err = -EINVAL; |
395 | cipher_name = alg->base.cra_name; |
396 | |
397 | /* Alas we screwed up the naming so we have to mangle the |
398 | * cipher name. |
399 | */ |
400 | if (!strncmp(cipher_name, "ecb(" , 4)) { |
401 | int len; |
402 | |
403 | len = strscpy(name, cipher_name + 4, sizeof(name)); |
404 | if (len < 2) |
405 | goto err_free_inst; |
406 | |
407 | if (name[len - 1] != ')') |
408 | goto err_free_inst; |
409 | |
410 | name[len - 1] = 0; |
411 | |
412 | if (snprintf(buf: inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
413 | fmt: "xts(%s)" , name) >= CRYPTO_MAX_ALG_NAME) { |
414 | err = -ENAMETOOLONG; |
415 | goto err_free_inst; |
416 | } |
417 | } else |
418 | goto err_free_inst; |
419 | |
420 | err = crypto_grab_cipher(spawn: &ctx->tweak_spawn, |
421 | inst: skcipher_crypto_instance(inst), name, type: 0, mask); |
422 | if (err) |
423 | goto err_free_inst; |
424 | |
425 | inst->alg.base.cra_priority = alg->base.cra_priority; |
426 | inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; |
427 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | |
428 | (__alignof__(u64) - 1); |
429 | |
430 | inst->alg.ivsize = XTS_BLOCK_SIZE; |
431 | inst->alg.min_keysize = alg->min_keysize * 2; |
432 | inst->alg.max_keysize = alg->max_keysize * 2; |
433 | |
434 | inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); |
435 | |
436 | inst->alg.init = xts_init_tfm; |
437 | inst->alg.exit = xts_exit_tfm; |
438 | |
439 | inst->alg.setkey = xts_setkey; |
440 | inst->alg.encrypt = xts_encrypt; |
441 | inst->alg.decrypt = xts_decrypt; |
442 | |
443 | inst->free = xts_free_instance; |
444 | |
445 | err = skcipher_register_instance(tmpl, inst); |
446 | if (err) { |
447 | err_free_inst: |
448 | xts_free_instance(inst); |
449 | } |
450 | return err; |
451 | } |
452 | |
453 | static struct crypto_template xts_tmpl = { |
454 | .name = "xts" , |
455 | .create = xts_create, |
456 | .module = THIS_MODULE, |
457 | }; |
458 | |
459 | static int __init xts_module_init(void) |
460 | { |
461 | return crypto_register_template(tmpl: &xts_tmpl); |
462 | } |
463 | |
464 | static void __exit xts_module_exit(void) |
465 | { |
466 | crypto_unregister_template(tmpl: &xts_tmpl); |
467 | } |
468 | |
469 | subsys_initcall(xts_module_init); |
470 | module_exit(xts_module_exit); |
471 | |
472 | MODULE_LICENSE("GPL" ); |
473 | MODULE_DESCRIPTION("XTS block cipher mode" ); |
474 | MODULE_ALIAS_CRYPTO("xts" ); |
475 | MODULE_IMPORT_NS(CRYPTO_INTERNAL); |
476 | MODULE_SOFTDEP("pre: ecb" ); |
477 | |