1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support |
4 | * |
5 | * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. |
6 | * |
7 | * Author: Gary R Hook <gary.hook@amd.com> |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/delay.h> |
13 | #include <linux/scatterlist.h> |
14 | #include <linux/crypto.h> |
15 | #include <crypto/internal/aead.h> |
16 | #include <crypto/algapi.h> |
17 | #include <crypto/aes.h> |
18 | #include <crypto/ctr.h> |
19 | #include <crypto/gcm.h> |
20 | #include <crypto/scatterwalk.h> |
21 | |
22 | #include "ccp-crypto.h" |
23 | |
24 | static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) |
25 | { |
26 | return ret; |
27 | } |
28 | |
29 | static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, |
30 | unsigned int key_len) |
31 | { |
32 | struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); |
33 | |
34 | switch (key_len) { |
35 | case AES_KEYSIZE_128: |
36 | ctx->u.aes.type = CCP_AES_TYPE_128; |
37 | break; |
38 | case AES_KEYSIZE_192: |
39 | ctx->u.aes.type = CCP_AES_TYPE_192; |
40 | break; |
41 | case AES_KEYSIZE_256: |
42 | ctx->u.aes.type = CCP_AES_TYPE_256; |
43 | break; |
44 | default: |
45 | return -EINVAL; |
46 | } |
47 | |
48 | ctx->u.aes.mode = CCP_AES_MODE_GCM; |
49 | ctx->u.aes.key_len = key_len; |
50 | |
51 | memcpy(ctx->u.aes.key, key, key_len); |
52 | sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); |
53 | |
54 | return 0; |
55 | } |
56 | |
57 | static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, |
58 | unsigned int authsize) |
59 | { |
60 | switch (authsize) { |
61 | case 16: |
62 | case 15: |
63 | case 14: |
64 | case 13: |
65 | case 12: |
66 | case 8: |
67 | case 4: |
68 | break; |
69 | default: |
70 | return -EINVAL; |
71 | } |
72 | |
73 | return 0; |
74 | } |
75 | |
76 | static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) |
77 | { |
78 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
79 | struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); |
80 | struct ccp_aes_req_ctx *rctx = aead_request_ctx_dma(req); |
81 | struct scatterlist *iv_sg = NULL; |
82 | unsigned int iv_len = 0; |
83 | int i; |
84 | int ret = 0; |
85 | |
86 | if (!ctx->u.aes.key_len) |
87 | return -EINVAL; |
88 | |
89 | if (ctx->u.aes.mode != CCP_AES_MODE_GCM) |
90 | return -EINVAL; |
91 | |
92 | if (!req->iv) |
93 | return -EINVAL; |
94 | |
95 | /* |
96 | * 5 parts: |
97 | * plaintext/ciphertext input |
98 | * AAD |
99 | * key |
100 | * IV |
101 | * Destination+tag buffer |
102 | */ |
103 | |
104 | /* Prepare the IV: 12 bytes + an integer (counter) */ |
105 | memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); |
106 | for (i = 0; i < 3; i++) |
107 | rctx->iv[i + GCM_AES_IV_SIZE] = 0; |
108 | rctx->iv[AES_BLOCK_SIZE - 1] = 1; |
109 | |
110 | /* Set up a scatterlist for the IV */ |
111 | iv_sg = &rctx->iv_sg; |
112 | iv_len = AES_BLOCK_SIZE; |
113 | sg_init_one(iv_sg, rctx->iv, iv_len); |
114 | |
115 | /* The AAD + plaintext are concatenated in the src buffer */ |
116 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); |
117 | INIT_LIST_HEAD(list: &rctx->cmd.entry); |
118 | rctx->cmd.engine = CCP_ENGINE_AES; |
119 | rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm); |
120 | rctx->cmd.u.aes.type = ctx->u.aes.type; |
121 | rctx->cmd.u.aes.mode = ctx->u.aes.mode; |
122 | rctx->cmd.u.aes.action = encrypt; |
123 | rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; |
124 | rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; |
125 | rctx->cmd.u.aes.iv = iv_sg; |
126 | rctx->cmd.u.aes.iv_len = iv_len; |
127 | rctx->cmd.u.aes.src = req->src; |
128 | rctx->cmd.u.aes.src_len = req->cryptlen; |
129 | rctx->cmd.u.aes.aad_len = req->assoclen; |
130 | |
131 | /* The cipher text + the tag are in the dst buffer */ |
132 | rctx->cmd.u.aes.dst = req->dst; |
133 | |
134 | ret = ccp_crypto_enqueue_request(req: &req->base, cmd: &rctx->cmd); |
135 | |
136 | return ret; |
137 | } |
138 | |
139 | static int ccp_aes_gcm_encrypt(struct aead_request *req) |
140 | { |
141 | return ccp_aes_gcm_crypt(req, encrypt: CCP_AES_ACTION_ENCRYPT); |
142 | } |
143 | |
144 | static int ccp_aes_gcm_decrypt(struct aead_request *req) |
145 | { |
146 | return ccp_aes_gcm_crypt(req, encrypt: CCP_AES_ACTION_DECRYPT); |
147 | } |
148 | |
149 | static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm) |
150 | { |
151 | struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); |
152 | |
153 | ctx->complete = ccp_aes_gcm_complete; |
154 | ctx->u.aes.key_len = 0; |
155 | |
156 | crypto_aead_set_reqsize_dma(aead: tfm, reqsize: sizeof(struct ccp_aes_req_ctx)); |
157 | |
158 | return 0; |
159 | } |
160 | |
161 | static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm) |
162 | { |
163 | } |
164 | |
165 | static struct aead_alg ccp_aes_gcm_defaults = { |
166 | .setkey = ccp_aes_gcm_setkey, |
167 | .setauthsize = ccp_aes_gcm_setauthsize, |
168 | .encrypt = ccp_aes_gcm_encrypt, |
169 | .decrypt = ccp_aes_gcm_decrypt, |
170 | .init = ccp_aes_gcm_cra_init, |
171 | .ivsize = GCM_AES_IV_SIZE, |
172 | .maxauthsize = AES_BLOCK_SIZE, |
173 | .base = { |
174 | .cra_flags = CRYPTO_ALG_ASYNC | |
175 | CRYPTO_ALG_ALLOCATES_MEMORY | |
176 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
177 | CRYPTO_ALG_NEED_FALLBACK, |
178 | .cra_blocksize = AES_BLOCK_SIZE, |
179 | .cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, |
180 | .cra_priority = CCP_CRA_PRIORITY, |
181 | .cra_exit = ccp_aes_gcm_cra_exit, |
182 | .cra_module = THIS_MODULE, |
183 | }, |
184 | }; |
185 | |
186 | struct ccp_aes_aead_def { |
187 | enum ccp_aes_mode mode; |
188 | unsigned int version; |
189 | const char *name; |
190 | const char *driver_name; |
191 | unsigned int blocksize; |
192 | unsigned int ivsize; |
193 | struct aead_alg *alg_defaults; |
194 | }; |
195 | |
196 | static struct ccp_aes_aead_def aes_aead_algs[] = { |
197 | { |
198 | .mode = CCP_AES_MODE_GHASH, |
199 | .version = CCP_VERSION(5, 0), |
200 | .name = "gcm(aes)" , |
201 | .driver_name = "gcm-aes-ccp" , |
202 | .blocksize = 1, |
203 | .ivsize = AES_BLOCK_SIZE, |
204 | .alg_defaults = &ccp_aes_gcm_defaults, |
205 | }, |
206 | }; |
207 | |
208 | static int ccp_register_aes_aead(struct list_head *head, |
209 | const struct ccp_aes_aead_def *def) |
210 | { |
211 | struct ccp_crypto_aead *ccp_aead; |
212 | struct aead_alg *alg; |
213 | int ret; |
214 | |
215 | ccp_aead = kzalloc(size: sizeof(*ccp_aead), GFP_KERNEL); |
216 | if (!ccp_aead) |
217 | return -ENOMEM; |
218 | |
219 | INIT_LIST_HEAD(list: &ccp_aead->entry); |
220 | |
221 | ccp_aead->mode = def->mode; |
222 | |
223 | /* Copy the defaults and override as necessary */ |
224 | alg = &ccp_aead->alg; |
225 | *alg = *def->alg_defaults; |
226 | snprintf(buf: alg->base.cra_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , def->name); |
227 | snprintf(buf: alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
228 | def->driver_name); |
229 | alg->base.cra_blocksize = def->blocksize; |
230 | |
231 | ret = crypto_register_aead(alg); |
232 | if (ret) { |
233 | pr_err("%s aead algorithm registration error (%d)\n" , |
234 | alg->base.cra_name, ret); |
235 | kfree(objp: ccp_aead); |
236 | return ret; |
237 | } |
238 | |
239 | list_add(new: &ccp_aead->entry, head); |
240 | |
241 | return 0; |
242 | } |
243 | |
244 | int ccp_register_aes_aeads(struct list_head *head) |
245 | { |
246 | int i, ret; |
247 | unsigned int ccpversion = ccp_version(); |
248 | |
249 | for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) { |
250 | if (aes_aead_algs[i].version > ccpversion) |
251 | continue; |
252 | ret = ccp_register_aes_aead(head, def: &aes_aead_algs[i]); |
253 | if (ret) |
254 | return ret; |
255 | } |
256 | |
257 | return 0; |
258 | } |
259 | |