1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * aes-ce-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions |
4 | * |
5 | * Copyright (C) 2013 - 2017 Linaro Ltd. |
6 | * Copyright (C) 2024 Google LLC |
7 | * |
8 | * Author: Ard Biesheuvel <ardb@kernel.org> |
9 | */ |
10 | |
11 | #include <asm/neon.h> |
12 | #include <asm/unaligned.h> |
13 | #include <crypto/aes.h> |
14 | #include <crypto/scatterwalk.h> |
15 | #include <crypto/internal/aead.h> |
16 | #include <crypto/internal/skcipher.h> |
17 | #include <linux/module.h> |
18 | |
19 | #include "aes-ce-setkey.h" |
20 | |
21 | MODULE_IMPORT_NS(CRYPTO_INTERNAL); |
22 | |
23 | static int num_rounds(struct crypto_aes_ctx *ctx) |
24 | { |
25 | /* |
26 | * # of rounds specified by AES: |
27 | * 128 bit key 10 rounds |
28 | * 192 bit key 12 rounds |
29 | * 256 bit key 14 rounds |
30 | * => n byte key => 6 + (n/4) rounds |
31 | */ |
32 | return 6 + ctx->key_length / 4; |
33 | } |
34 | |
35 | asmlinkage u32 ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds, |
36 | int blocks, u8 dg[], int enc_before, |
37 | int enc_after); |
38 | |
39 | asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, |
40 | u32 const rk[], u32 rounds, u8 mac[], |
41 | u8 ctr[], u8 const final_iv[]); |
42 | |
43 | asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, |
44 | u32 const rk[], u32 rounds, u8 mac[], |
45 | u8 ctr[], u8 const final_iv[]); |
46 | |
47 | static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, |
48 | unsigned int key_len) |
49 | { |
50 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm); |
51 | |
52 | return ce_aes_expandkey(ctx, in_key, key_len); |
53 | } |
54 | |
55 | static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
56 | { |
57 | if ((authsize & 1) || authsize < 4) |
58 | return -EINVAL; |
59 | return 0; |
60 | } |
61 | |
62 | static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) |
63 | { |
64 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
65 | __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8]; |
66 | u32 l = req->iv[0] + 1; |
67 | |
68 | /* verify that CCM dimension 'L' is set correctly in the IV */ |
69 | if (l < 2 || l > 8) |
70 | return -EINVAL; |
71 | |
72 | /* verify that msglen can in fact be represented in L bytes */ |
73 | if (l < 4 && msglen >> (8 * l)) |
74 | return -EOVERFLOW; |
75 | |
76 | /* |
77 | * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi |
78 | * uses a u32 type to represent msglen so the top 4 bytes are always 0. |
79 | */ |
80 | n[0] = 0; |
81 | n[1] = cpu_to_be32(msglen); |
82 | |
83 | memcpy(maciv, req->iv, AES_BLOCK_SIZE - l); |
84 | |
85 | /* |
86 | * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C) |
87 | * - bits 0..2 : max # of bytes required to represent msglen, minus 1 |
88 | * (already set by caller) |
89 | * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc) |
90 | * - bit 6 : indicates presence of authenticate-only data |
91 | */ |
92 | maciv[0] |= (crypto_aead_authsize(tfm: aead) - 2) << 2; |
93 | if (req->assoclen) |
94 | maciv[0] |= 0x40; |
95 | |
96 | memset(&req->iv[AES_BLOCK_SIZE - l], 0, l); |
97 | return 0; |
98 | } |
99 | |
100 | static u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, |
101 | u32 macp, u32 const rk[], u32 rounds) |
102 | { |
103 | int enc_after = (macp + abytes) % AES_BLOCK_SIZE; |
104 | |
105 | do { |
106 | u32 blocks = abytes / AES_BLOCK_SIZE; |
107 | |
108 | if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) { |
109 | u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, dg: mac, |
110 | enc_before: macp, enc_after); |
111 | u32 adv = (blocks - rem) * AES_BLOCK_SIZE; |
112 | |
113 | macp = enc_after ? 0 : AES_BLOCK_SIZE; |
114 | in += adv; |
115 | abytes -= adv; |
116 | |
117 | if (unlikely(rem)) { |
118 | kernel_neon_end(); |
119 | kernel_neon_begin(); |
120 | macp = 0; |
121 | } |
122 | } else { |
123 | u32 l = min(AES_BLOCK_SIZE - macp, abytes); |
124 | |
125 | crypto_xor(dst: &mac[macp], src: in, size: l); |
126 | in += l; |
127 | macp += l; |
128 | abytes -= l; |
129 | } |
130 | } while (abytes > 0); |
131 | |
132 | return macp; |
133 | } |
134 | |
135 | static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) |
136 | { |
137 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
138 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm: aead); |
139 | struct __packed { __be16 l; __be32 h; u16 len; } ltag; |
140 | struct scatter_walk walk; |
141 | u32 len = req->assoclen; |
142 | u32 macp = AES_BLOCK_SIZE; |
143 | |
144 | /* prepend the AAD with a length tag */ |
145 | if (len < 0xff00) { |
146 | ltag.l = cpu_to_be16(len); |
147 | ltag.len = 2; |
148 | } else { |
149 | ltag.l = cpu_to_be16(0xfffe); |
150 | put_unaligned_be32(val: len, p: <ag.h); |
151 | ltag.len = 6; |
152 | } |
153 | |
154 | macp = ce_aes_ccm_auth_data(mac, in: (u8 *)<ag, abytes: ltag.len, macp, |
155 | rk: ctx->key_enc, rounds: num_rounds(ctx)); |
156 | scatterwalk_start(walk: &walk, sg: req->src); |
157 | |
158 | do { |
159 | u32 n = scatterwalk_clamp(walk: &walk, nbytes: len); |
160 | u8 *p; |
161 | |
162 | if (!n) { |
163 | scatterwalk_start(walk: &walk, sg: sg_next(walk.sg)); |
164 | n = scatterwalk_clamp(walk: &walk, nbytes: len); |
165 | } |
166 | p = scatterwalk_map(walk: &walk); |
167 | |
168 | macp = ce_aes_ccm_auth_data(mac, in: p, abytes: n, macp, rk: ctx->key_enc, |
169 | rounds: num_rounds(ctx)); |
170 | |
171 | len -= n; |
172 | |
173 | scatterwalk_unmap(vaddr: p); |
174 | scatterwalk_advance(walk: &walk, nbytes: n); |
175 | scatterwalk_done(walk: &walk, out: 0, more: len); |
176 | } while (len); |
177 | } |
178 | |
179 | static int ccm_encrypt(struct aead_request *req) |
180 | { |
181 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
182 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm: aead); |
183 | struct skcipher_walk walk; |
184 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
185 | u8 orig_iv[AES_BLOCK_SIZE]; |
186 | u32 len = req->cryptlen; |
187 | int err; |
188 | |
189 | err = ccm_init_mac(req, maciv: mac, msglen: len); |
190 | if (err) |
191 | return err; |
192 | |
193 | /* preserve the original iv for the final round */ |
194 | memcpy(orig_iv, req->iv, AES_BLOCK_SIZE); |
195 | |
196 | err = skcipher_walk_aead_encrypt(walk: &walk, req, atomic: false); |
197 | if (unlikely(err)) |
198 | return err; |
199 | |
200 | kernel_neon_begin(); |
201 | |
202 | if (req->assoclen) |
203 | ccm_calculate_auth_mac(req, mac); |
204 | |
205 | do { |
206 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; |
207 | const u8 *src = walk.src.virt.addr; |
208 | u8 *dst = walk.dst.virt.addr; |
209 | u8 buf[AES_BLOCK_SIZE]; |
210 | u8 *final_iv = NULL; |
211 | |
212 | if (walk.nbytes == walk.total) { |
213 | tail = 0; |
214 | final_iv = orig_iv; |
215 | } |
216 | |
217 | if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) |
218 | src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], |
219 | src, walk.nbytes); |
220 | |
221 | ce_aes_ccm_encrypt(out: dst, in: src, cbytes: walk.nbytes - tail, |
222 | rk: ctx->key_enc, rounds: num_rounds(ctx), |
223 | mac, ctr: walk.iv, final_iv); |
224 | |
225 | if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) |
226 | memcpy(walk.dst.virt.addr, dst, walk.nbytes); |
227 | |
228 | if (walk.nbytes) { |
229 | err = skcipher_walk_done(walk: &walk, err: tail); |
230 | } |
231 | } while (walk.nbytes); |
232 | |
233 | kernel_neon_end(); |
234 | |
235 | if (unlikely(err)) |
236 | return err; |
237 | |
238 | /* copy authtag to end of dst */ |
239 | scatterwalk_map_and_copy(buf: mac, sg: req->dst, start: req->assoclen + req->cryptlen, |
240 | nbytes: crypto_aead_authsize(tfm: aead), out: 1); |
241 | |
242 | return 0; |
243 | } |
244 | |
245 | static int ccm_decrypt(struct aead_request *req) |
246 | { |
247 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
248 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm: aead); |
249 | unsigned int authsize = crypto_aead_authsize(tfm: aead); |
250 | struct skcipher_walk walk; |
251 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
252 | u8 orig_iv[AES_BLOCK_SIZE]; |
253 | u32 len = req->cryptlen - authsize; |
254 | int err; |
255 | |
256 | err = ccm_init_mac(req, maciv: mac, msglen: len); |
257 | if (err) |
258 | return err; |
259 | |
260 | /* preserve the original iv for the final round */ |
261 | memcpy(orig_iv, req->iv, AES_BLOCK_SIZE); |
262 | |
263 | err = skcipher_walk_aead_decrypt(walk: &walk, req, atomic: false); |
264 | if (unlikely(err)) |
265 | return err; |
266 | |
267 | kernel_neon_begin(); |
268 | |
269 | if (req->assoclen) |
270 | ccm_calculate_auth_mac(req, mac); |
271 | |
272 | do { |
273 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; |
274 | const u8 *src = walk.src.virt.addr; |
275 | u8 *dst = walk.dst.virt.addr; |
276 | u8 buf[AES_BLOCK_SIZE]; |
277 | u8 *final_iv = NULL; |
278 | |
279 | if (walk.nbytes == walk.total) { |
280 | tail = 0; |
281 | final_iv = orig_iv; |
282 | } |
283 | |
284 | if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) |
285 | src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], |
286 | src, walk.nbytes); |
287 | |
288 | ce_aes_ccm_decrypt(out: dst, in: src, cbytes: walk.nbytes - tail, |
289 | rk: ctx->key_enc, rounds: num_rounds(ctx), |
290 | mac, ctr: walk.iv, final_iv); |
291 | |
292 | if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) |
293 | memcpy(walk.dst.virt.addr, dst, walk.nbytes); |
294 | |
295 | if (walk.nbytes) { |
296 | err = skcipher_walk_done(walk: &walk, err: tail); |
297 | } |
298 | } while (walk.nbytes); |
299 | |
300 | kernel_neon_end(); |
301 | |
302 | if (unlikely(err)) |
303 | return err; |
304 | |
305 | /* compare calculated auth tag with the stored one */ |
306 | scatterwalk_map_and_copy(buf: orig_iv, sg: req->src, |
307 | start: req->assoclen + req->cryptlen - authsize, |
308 | nbytes: authsize, out: 0); |
309 | |
310 | if (crypto_memneq(a: mac, b: orig_iv, size: authsize)) |
311 | return -EBADMSG; |
312 | return 0; |
313 | } |
314 | |
315 | static struct aead_alg ccm_aes_alg = { |
316 | .base = { |
317 | .cra_name = "ccm(aes)" , |
318 | .cra_driver_name = "ccm-aes-ce" , |
319 | .cra_priority = 300, |
320 | .cra_blocksize = 1, |
321 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
322 | .cra_module = THIS_MODULE, |
323 | }, |
324 | .ivsize = AES_BLOCK_SIZE, |
325 | .chunksize = AES_BLOCK_SIZE, |
326 | .maxauthsize = AES_BLOCK_SIZE, |
327 | .setkey = ccm_setkey, |
328 | .setauthsize = ccm_setauthsize, |
329 | .encrypt = ccm_encrypt, |
330 | .decrypt = ccm_decrypt, |
331 | }; |
332 | |
333 | static int __init aes_mod_init(void) |
334 | { |
335 | if (!cpu_have_named_feature(AES)) |
336 | return -ENODEV; |
337 | return crypto_register_aead(alg: &ccm_aes_alg); |
338 | } |
339 | |
340 | static void __exit aes_mod_exit(void) |
341 | { |
342 | crypto_unregister_aead(alg: &ccm_aes_alg); |
343 | } |
344 | |
345 | module_init(aes_mod_init); |
346 | module_exit(aes_mod_exit); |
347 | |
348 | MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions" ); |
349 | MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>" ); |
350 | MODULE_LICENSE("GPL v2" ); |
351 | MODULE_ALIAS_CRYPTO("ccm(aes)" ); |
352 | |